summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeVFDTests.cmake18
-rw-r--r--testpar/t_2Gio.c339
-rw-r--r--testpar/t_bigio.c41
-rw-r--r--testpar/t_cache.c74
-rw-r--r--testpar/t_cache_image.c42
-rw-r--r--testpar/t_coll_chunk.c16
-rw-r--r--testpar/t_coll_md_read.c93
-rw-r--r--testpar/t_dset.c343
-rw-r--r--testpar/t_file.c20
-rw-r--r--testpar/t_filters_parallel.c4584
-rw-r--r--testpar/t_filters_parallel.h117
-rw-r--r--testpar/t_mdset.c6
-rw-r--r--testpar/t_mpi.c4
-rw-r--r--testpar/t_pflush1.c12
-rw-r--r--testpar/t_pflush2.c12
-rw-r--r--testpar/t_ph5basic.c2
-rw-r--r--testpar/t_pread.c10
-rw-r--r--testpar/t_prop.c6
-rw-r--r--testpar/t_shapesame.c64
-rw-r--r--testpar/t_span_tree.c30
-rw-r--r--testpar/testpflush.sh.in2
-rw-r--r--testpar/testphdf5.h8
22 files changed, 3859 insertions, 1984 deletions
diff --git a/testpar/CMakeVFDTests.cmake b/testpar/CMakeVFDTests.cmake
index f50ca16..d6a4025 100644
--- a/testpar/CMakeVFDTests.cmake
+++ b/testpar/CMakeVFDTests.cmake
@@ -15,29 +15,13 @@
### T E S T I N G ###
##############################################################################
##############################################################################
-set (VFD_LIST
- sec2
- stdio
- core
- core_paged
- split
- multi
- family
-)
+H5_CREATE_VFD_DIR()
set (H5P_VFD_TESTS
t_pflush1
t_pflush2
)
-if (H5_HAVE_DIRECT)
- set (VFD_LIST ${VFD_LIST} direct)
-endif ()
-
-foreach (vfdtest ${VFD_LIST})
- file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/${vfdtest}")
-endforeach ()
-
macro (ADD_VFD_TEST vfdname resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
foreach (h5_test ${H5P_VFD_TESTS})
diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c
index d5b9038..911be2c 100644
--- a/testpar/t_2Gio.c
+++ b/testpar/t_2Gio.c
@@ -33,7 +33,7 @@
#include "mpi.h"
/* For this test, we don't want to inherit the RANK definition
- * from testphdf5.h. We'll define MAX_RANK to accomodate 3D arrays
+ * from testphdf5.h. We'll define MAX_RANK to accommodate 3D arrays
* and use that definition rather than RANK.
*/
#ifndef MAX_RANK
@@ -3047,7 +3047,7 @@ compress_readAll(void)
nerrors++;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
VRFY((ret >= 0), "H5Dwrite succeeded");
#endif
@@ -3323,7 +3323,7 @@ none_selection_chunk(void)
* Simple independent I/O. This tests that the defaults are properly set.
*
* TEST_ACTUAL_IO_RESET:
- * Perfroms collective and then independent I/O wit hthe same dxpl to
+ * Performs collective and then independent I/O with hthe same dxpl to
* make sure the peroperty is correctly reset to the default on each use.
* Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE
* (The most complex case that works on all builds) and then performs
@@ -3332,7 +3332,7 @@ none_selection_chunk(void)
* Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
* is not needed as they are covered by DIRECT_CHUNK_MIX and
* MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
- * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold.
+ * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold.
*
* Modification:
* - Refctore to remove multi-chunk-without-opimization test and update for
@@ -3347,12 +3347,12 @@ none_selection_chunk(void)
static void
test_actual_io_mode(int selection_mode)
{
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
const char * filename;
const char * test_name;
hbool_t direct_multi_chunk_io;
@@ -3520,7 +3520,7 @@ test_actual_io_mode(int selection_mode)
break;
/* RESET tests that the properties are properly reset to defaults each time I/O is
- * performed. To acheive this, we have RESET perform collective I/O (which would change
+ * performed. To achieve this, we have RESET perform collective I/O (which would change
* the values from the defaults) followed by independent I/O (which should report the
* default values). RESET doesn't need to have a unique selection, so we reuse
* MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
@@ -3536,7 +3536,7 @@ test_actual_io_mode(int selection_mode)
* assgigned collective I/O, while each other process gets independent I/O.
* Since the root process with only access the first chunk, it will report
* collective I/O. The subsequent processes will access the first chunk
- * collectively, and their other chunk indpendently, reporting mixed I/O.
+ * collectively, and their other chunk independently, reporting mixed I/O.
*/
if (mpi_rank == 0) {
@@ -3605,8 +3605,8 @@ test_actual_io_mode(int selection_mode)
default:
test_name = "Undefined Selection Mode";
- actual_chunk_opt_mode_expected = -1;
- actual_io_mode_expected = -1;
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
break;
}
@@ -3651,7 +3651,7 @@ test_actual_io_mode(int selection_mode)
ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
- /* set this to manipulate testing senario about allocating processes
+ /* set this to manipulate testing scenario about allocating processes
* to chunks */
ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
@@ -3675,12 +3675,12 @@ test_actual_io_mode(int selection_mode)
H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
- /* Retreive Actual io valuess */
+ /* Retrieve Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retriving actual io mode suceeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
@@ -3688,12 +3688,12 @@ test_actual_io_mode(int selection_mode)
H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
- /* Retreive Actual io values */
+ /* Retrieve Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retriving actual io mode succeeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
/* Check write vs read */
VRFY((actual_io_mode_read == actual_io_mode_write),
@@ -3714,7 +3714,7 @@ test_actual_io_mode(int selection_mode)
actual_io_mode_write);
}
- /* To test that the property is succesfully reset to the default, we perform some
+ /* To test that the property is successfully reset to the default, we perform some
* independent I/O after the collective I/O
*/
if (selection_mode == TEST_ACTUAL_IO_RESET) {
@@ -3731,9 +3731,9 @@ test_actual_io_mode(int selection_mode)
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retriving actual io mode succeeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
"actual_chunk_opt_mode has correct value for reset write (independent)");
@@ -3746,9 +3746,9 @@ test_actual_io_mode(int selection_mode)
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retriving actual io mode succeeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
"actual_chunk_opt_mode has correct value for reset read (independent)");
@@ -3794,11 +3794,11 @@ actual_io_mode_tests(void)
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
- /* The Multi Chunk Mixed test requires atleast three processes. */
+ /* The Multi Chunk Mixed test requires at least three processes. */
if (mpi_size > 2)
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
else
- HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
+ HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n");
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
@@ -3842,7 +3842,7 @@ actual_io_mode_tests(void)
* Test for Data Type Conversion as the cause of breaking collective I/O.
*
* TEST_DATA_TRANSFORMS:
- * Test for Data Transfrom feature as the cause of breaking collective I/O.
+ * Test for Data Transform feature as the cause of breaking collective I/O.
*
* TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
* Test for NULL dataspace as the cause of breaking collective I/O.
@@ -3853,12 +3853,6 @@ actual_io_mode_tests(void)
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
* Test for Externl-File storage as the cause of breaking collective I/O.
*
- * TEST_FILTERS:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter
- * feature. Use test_no_collective_cause_mode_filter() function instead.
- *
- *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
@@ -3898,9 +3892,6 @@ test_no_collective_cause_mode(int selection_mode)
hid_t file_space = -1;
hsize_t chunk_dims[MAX_RANK];
herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
/* set to global value as default */
int l_facc_type = facc_type;
char message[256];
@@ -3932,21 +3923,6 @@ test_no_collective_cause_mode(int selection_mode)
is_chunked = 0;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY((ret >= 0), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &filter_info);
- VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
- (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
- "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0), "set filter (flecher32) succeeded");
- }
-#endif /* LATER */
-
if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
sid = H5Screate(H5S_NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -4004,7 +3980,7 @@ test_no_collective_cause_mode(int selection_mode)
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
- test_name = "Broken Collective I/O - DATA Transfroms";
+ test_name = "Broken Collective I/O - DATA Transforms";
no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS;
no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS;
}
@@ -4022,14 +3998,6 @@ test_no_collective_cause_mode(int selection_mode)
no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected |= H5D_MPIO_FILTERS;
- no_collective_cause_global_expected |= H5D_MPIO_FILTERS;
- }
-#endif /* LATER */
-
if (selection_mode & TEST_COLLECTIVE) {
test_name = "Broken Collective I/O - Not Broken";
no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
@@ -4102,7 +4070,7 @@ test_no_collective_cause_mode(int selection_mode)
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
&no_collective_cause_global_write);
- VRFY((ret >= 0), "retriving no collective cause succeeded");
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
/*---------------------
* Test Read access
@@ -4122,7 +4090,7 @@ test_no_collective_cause_mode(int selection_mode)
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
&no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded");
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
/* Check write vs read */
VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
@@ -4166,240 +4134,6 @@ test_no_collective_cause_mode(int selection_mode)
return;
}
-#if 0
-/*
- * Function: test_no_collective_cause_mode_filter
- *
- * Purpose:
- * Test specific for using filter as a caus of broken collective I/O and
- * checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
- * have the correct values.
- *
- * NOTE:
- * This is a temporary function.
- * test_no_collective_cause_mode(TEST_FILTERS) will replace this when
- * H5Dcreate and H5write support for mpio and filter feature.
- *
- * Input:
- * TEST_FILTERS_READ:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- *
- * Programmer: Jonathan Kim
- * Date: Aug, 2012
- */
-static void
-test_no_collective_cause_mode_filter(int selection_mode)
-{
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_read = 0;
- uint32_t no_collective_cause_global_expected = 0;
-
- const char * filename;
- const char * test_name;
- hbool_t is_chunked=1;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int * buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_write = -1;
- hid_t fapl_read = -1;
- hid_t dcpl = -1;
- hid_t dxpl = -1;
- hsize_t dims[MAX_RANK];
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hsize_t chunk_dims[MAX_RANK];
- herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
- char message[256];
-
- /* Set up MPI parameters */
- MPI_Comm_size(test_comm, &mpi_size);
- MPI_Comm_rank(test_comm, &mpi_rank);
-
- MPI_Barrier(test_comm);
-
- HDassert(mpi_size >= 1);
-
- mpi_comm = test_comm;
- mpi_info = MPI_INFO_NULL;
-
- /* Create the dataset creation plist */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation plist created successfully");
-
- if (selection_mode == TEST_FILTERS_READ ) {
-#ifdef LATER /* fletcher32 */
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, (unsigned int *) &filter_info);
- VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0),"set filter (flecher32) succeeded");
-#endif /* LATER */
- }
- else {
- VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
- }
-
- /* Create the basic Space */
- dims[0] = dim0;
- dims[1] = dim1;
- sid = H5Screate_simple (MAX_RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
-
- filename = (const char *)GetTestParameters();
- HDassert(filename != NULL);
-
- /* Setup the file access template */
- fapl_write = create_faccess_plist(mpi_comm, mpi_info, FACC_DEFAULT);
- VRFY((fapl_write >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_write);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* If we are not testing contiguous datasets */
- if(is_chunked) {
- /* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
- chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
- }
-
-
- /* Create the dataset */
- dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
-#ifdef LATER /* fletcher32 */
- /* Set expected cause */
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected = H5D_MPIO_FILTERS;
- no_collective_cause_global_expected = H5D_MPIO_FILTERS;
-#endif /* LATER */
-
- /* Get the file dataspace */
- file_space = H5Dget_space(dataset);
- VRFY((file_space >= 0), "H5Dget_space succeeded");
-
- /* Create the memory dataspace */
- mem_space = H5Screate_simple (MAX_RANK, dims, NULL);
- VRFY((mem_space >= 0), "mem_space created");
-
- /* Get the number of elements in the selection */
- length = dim0 * dim1;
-
- /* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
- buffer[i] = i;
-
- /* Set up the dxpl for the write */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if (selection_mode == TEST_FILTERS_READ) {
- /* To test read in collective I/O mode , write in independent mode
- * because write fails with mpio + filter */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
- else {
- /* To test write in collective I/O mode. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
-
-
- /* Write */
- ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
-
- /* Make a copy of the dxpl to test the read operation */
- dxpl = H5Pcopy(dxpl);
- VRFY((dxpl >= 0), "H5Pcopy succeeded");
-
- if (dataset)
- H5Dclose(dataset);
- if (fapl_write)
- H5Pclose(fapl_write);
- if (fid)
- H5Fclose(fid);
-
-
- /*---------------------
- * Test Read access
- *---------------------*/
-
- /* Setup the file access template */
- fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type);
- VRFY((fapl_read >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fopen (filename, H5F_ACC_RDONLY, fapl_read);
- dataset = H5Dopen2 (fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
-
- /* Set collective I/O properties in the dxpl. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Read */
- ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
- /* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded" );
-
- /* Test values */
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
- VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
- VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
-
- /* Release some resources */
- if (sid)
- H5Sclose(sid);
- if (fapl_read)
- H5Pclose(fapl_read);
- if (dcpl)
- H5Pclose(dcpl);
- if (dxpl)
- H5Pclose(dxpl);
- if (dataset)
- H5Dclose(dataset);
- if (mem_space)
- H5Sclose(mem_space);
- if (file_space)
- H5Sclose(file_space);
- if (fid)
- H5Fclose(fid);
- HDfree(buffer);
- return;
-}
-#endif
-
/* Function: no_collective_cause_tests
*
* Purpose: Tests cases for broken collective IO.
@@ -4420,13 +4154,6 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
-#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
- /* test_no_collective_cause_mode (TEST_FILTERS); */
- test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
-#endif /* LATER */
/*
* Test combined causes
@@ -4547,7 +4274,7 @@ dataset_atomicity(void)
/* file locking allows only one file open (serial) for writing */
if (MAINPROCESS) {
fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fopen succeeed");
+ VRFY((fid >= 0), "H5Fopen succeeded");
}
/* should fail */
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index e5654f4..0a971c5 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1107,13 +1107,15 @@ single_rank_independent_io(void)
HDprintf("\nSingle Rank Independent I/O\n");
if (MAIN_PROCESS) {
- hsize_t dims[] = {LARGE_DIM};
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dset_id = -1;
- hid_t fspace_id = -1;
- hid_t mspace_id = -1;
- void * data = NULL;
+ hsize_t dims[] = {LARGE_DIM};
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dset_id = -1;
+ hid_t fspace_id = -1;
+ hid_t mspace_id = -1;
+ herr_t ret;
+ int * data = NULL;
+ uint64_t i;
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS");
@@ -1135,6 +1137,10 @@ single_rank_independent_io(void)
data = malloc(LARGE_DIM * sizeof(int));
+ /* Initialize data */
+ for (i = 0; i < LARGE_DIM; i++)
+ data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT);
+
if (mpi_rank_g == 0)
H5Sselect_all(fspace_id);
else
@@ -1143,7 +1149,24 @@ single_rank_independent_io(void)
dims[0] = LARGE_DIM;
mspace_id = H5Screate_simple(1, dims, NULL);
VRFY_G((mspace_id >= 0), "H5Screate_simple mspace_id succeeded");
+
+ /* Write data */
H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dwrite succeeded");
+
+ /* Wipe buffer */
+ HDmemset(data, 0, LARGE_DIM * sizeof(int));
+
+ /* Read data back */
+ H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dread succeeded");
+
+ /* Verify data */
+ for (i = 0; i < LARGE_DIM; i++)
+ if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
free(data);
H5Sclose(mspace_id);
@@ -1331,7 +1354,7 @@ coll_chunk2(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = space_dim1*mpi_size
@@ -1840,7 +1863,7 @@ main(int argc, char **argv)
/* Set the bigio processing limit to be 'newsize' bytes */
hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
- /* Having set the bigio handling to a size that is managable,
+ /* Having set the bigio handling to a size that is manageable,
* we'll set our 'bigcount' variable to be 2X that limit so
* that we try to ensure that our bigio handling is actually
* envoked and tested.
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 8696092..70ada01 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -126,7 +126,7 @@ int total_writes = 0;
*
* local_pinned: Boolean flag that is set to true iff the entry
* has been pinned in the local cache, but probably not all
- * caches. Such pins will typically not be consistant across
+ * caches. Such pins will typically not be consistent across
* processes, and thus cannot be marked as dirty unless they
* happen to overlap some collective operation.
*
@@ -205,7 +205,7 @@ struct datum data[NUM_DATA_ENTRIES];
* even divisor of NUM_DATA_ENTRIES. So far, all tests have been with
* powers of 10 that meet these criteria.
*
- * Further, this value must be consistant across all processes.
+ * Further, this value must be consistent across all processes.
*/
#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
@@ -252,7 +252,7 @@ int data_index[NUM_DATA_ENTRIES];
*
* Strangely, at least on Phoenix, the first solution runs faster by a
* rather large margin. However, I can imagine this changing with
- * different OS's and MPI implementatins.
+ * different OS's and MPI implementations.
*
* Thus I have left code supporting the second solution in place.
*
@@ -1282,7 +1282,7 @@ reset_server_counters(void)
* Function: server_main()
*
* Purpose: Main function for the server process. This process exists
- * to provide an independant view of the data array.
+ * to provide an independent view of the data array.
*
* The function handles request from the other processes in
* the test until the count of done messages received equals
@@ -3962,7 +3962,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
* Purpose: Verify that the indicated entries have been written exactly
* once each, and that the indicated total number of writes
* has been processed by the server process. Flag an error if
- * discrepency is noted. Finally reset the counters maintained
+ * discrepancy is noted. Finally reset the counters maintained
* by the server process.
*
* This function should only be called by the metadata cache
@@ -4050,7 +4050,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
/* final barrier to ensure that all processes think that the server
* counters have been reset before we leave the sync point. This
- * barrier is probaby not necessary at this point in time (5/9/10),
+ * barrier is probably not necessary at this point in time (5/9/10),
* but I can think of at least one likely change to the metadata write
* strategies that will require it -- hence its insertion now.
*/
@@ -4794,7 +4794,7 @@ server_smoke_check(void)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -5141,7 +5141,7 @@ smoke_check_1(int metadata_write_strategy)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -5305,7 +5305,7 @@ smoke_check_2(int metadata_write_strategy)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -5453,7 +5453,7 @@ smoke_check_2(int metadata_write_strategy)
*
* Purpose: Third smoke check for the parallel cache.
*
- * Use random reads to vary the loads on the diffferent
+ * Use random reads to vary the loads on the different
* processors. Also force different cache size adjustments.
*
* In this test, load process 0 heavily, and the other
@@ -5510,7 +5510,7 @@ smoke_check_3(int metadata_write_strategy)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -5738,7 +5738,7 @@ smoke_check_3(int metadata_write_strategy)
*
* Purpose: Fourth smoke check for the parallel cache.
*
- * Use random reads to vary the loads on the diffferent
+ * Use random reads to vary the loads on the different
* processors. Also force different cache size adjustments.
*
* In this test, load process 0 lightly, and the other
@@ -5795,7 +5795,7 @@ smoke_check_4(int metadata_write_strategy)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -6070,7 +6070,7 @@ smoke_check_5(int metadata_write_strategy)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -6355,7 +6355,7 @@ trace_file_check(int metadata_write_strategy)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose)
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -6488,7 +6488,7 @@ trace_file_check(int metadata_write_strategy)
} /* end if */
if (nerrors == 0) {
- HDsprintf(trace_file_name, "t_cache_trace.txt.%d", (int)file_mpi_rank);
+ HDsnprintf(trace_file_name, sizeof(trace_file_name), "t_cache_trace.txt.%d", (int)file_mpi_rank);
if ((trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL) {
@@ -6623,13 +6623,15 @@ trace_file_check(int metadata_write_strategy)
static hbool_t
smoke_check_6(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
- struct mssg_t mssg;
+ H5P_coll_md_read_flag_t md_reads_file_flag;
+ hbool_t md_reads_context_flag;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ struct mssg_t mssg;
switch (metadata_write_strategy) {
@@ -6660,7 +6662,7 @@ smoke_check_6(int metadata_write_strategy)
if (!server_main()) {
- /* some error occured in the server -- report failure */
+ /* some error occurred in the server -- report failure */
nerrors++;
if (verbose) {
HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__);
@@ -6685,7 +6687,9 @@ smoke_check_6(int metadata_write_strategy)
virt_num_data_entries = NUM_DATA_ENTRIES;
/* insert the first half collectively */
- H5CX_set_coll_metadata_read(TRUE);
+ md_reads_file_flag = H5P_USER_TRUE;
+ md_reads_context_flag = TRUE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < virt_num_data_entries / 2; i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6704,9 +6708,13 @@ smoke_check_6(int metadata_write_strategy)
H5_CHECK_OVERFLOW(cache_ptr->max_cache_size, size_t, double);
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
/* insert the other half independently */
- H5CX_set_coll_metadata_read(FALSE);
+ md_reads_file_flag = H5P_USER_FALSE;
+ md_reads_context_flag = FALSE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6724,6 +6732,8 @@ smoke_check_6(int metadata_write_strategy)
/* Make sure coll entries do not cross the 80% threshold */
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
/* flush the file */
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
@@ -6734,7 +6744,9 @@ smoke_check_6(int metadata_write_strategy)
}
/* Protect the first half of the entries collectively */
- H5CX_set_coll_metadata_read(TRUE);
+ md_reads_file_flag = H5P_USER_TRUE;
+ md_reads_context_flag = TRUE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < (virt_num_data_entries / 2); i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6752,9 +6764,13 @@ smoke_check_6(int metadata_write_strategy)
/* Make sure coll entries do not cross the 80% threshold */
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
/* protect the other half independently */
- H5CX_set_coll_metadata_read(FALSE);
+ md_reads_file_flag = H5P_USER_FALSE;
+ md_reads_context_flag = FALSE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6772,6 +6788,8 @@ smoke_check_6(int metadata_write_strategy)
/* Make sure coll entries do not cross the 80% threshold */
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < (virt_num_data_entries); i++) {
unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 17fb992..4229a77 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -449,7 +449,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
}
/* set the dataset creation plist to specify that the raw data is
- * to be partioned into 10X10 element chunks.
+ * to be partitioned into 10X10 element chunks.
*/
if (pass) {
@@ -477,7 +477,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* create the dataset */
if (pass) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
@@ -766,7 +766,7 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
while ( ( pass ) && ( i <= max_dset ) )
{
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
@@ -795,7 +795,7 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
* Set pass to FALSE and issue a suitable failure
* message if either the file contains a metadata cache image
* superblock extension and mdci_sbem_expected is TRUE, or
- * vise versa.
+ * vice versa.
*
* If mdci_sbem_expected is TRUE, also verify that the metadata
* cache has been advised of this.
@@ -807,7 +807,7 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
* FAPL entry when opening the file, and verify that the
* metadata cache is notified.
*
- * If config_fsm is TRUE, setup the persistant free space
+ * If config_fsm is TRUE, setup the persistent free space
* manager. Note that this flag may only be set if
* create_file is also TRUE.
*
@@ -884,7 +884,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* create a file access propertly list. */
+ /* create a file access property list. */
if (pass) {
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -956,7 +956,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (show_progress)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* setup the persistant free space manager if indicated */
+ /* setup the persistent free space manager if indicated */
if ((pass) && (config_fsm)) {
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
@@ -1138,13 +1138,13 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if ((file_ptr->shared->page_buf) && ((!enable_page_buffer) || (l_facc_type == FACC_MPIO))) {
pass = FALSE;
- failure_mssg = "page buffer unexepectedly enabled.";
+ failure_mssg = "page buffer unexpectedly enabled.";
}
else if ((file_ptr->shared->page_buf != NULL) &&
((enable_page_buffer) || (l_facc_type != FACC_MPIO))) {
pass = FALSE;
- failure_mssg = "page buffer unexepectedly disabled.";
+ failure_mssg = "page buffer unexpectedly disabled.";
}
}
@@ -1334,7 +1334,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1360,7 +1360,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* set the dataset creation plist to specify that the raw data is
- * to be partioned into 1X10X10 element chunks.
+ * to be partitioned into 1X10X10 element chunks.
*/
if (pass) {
@@ -1707,7 +1707,7 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
show_progress = (show_progress && (mpi_rank == 0));
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1840,7 +1840,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2208,7 +2208,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
hid_t dset_id = -1;
hid_t filespace_id = -1;
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2460,7 +2460,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* open the dataset */
if (pass) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
if (dataset_ids[i] < 0) {
@@ -2796,7 +2796,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* Verify that all other processes receive the cache image block
* from process 0.
*
- * Since we have alread verified that only process 0 has read the
+ * Since we have already verified that only process 0 has read the
* image, it is sufficient to verify that the image was loaded on
* all processes.
*/
@@ -3067,7 +3067,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* Verify that all other processes receive the cache image block
* from process 0.
*
- * Since we have alread verified that only process 0 has read the
+ * Since we have already verified that only process 0 has read the
* image, it is sufficient to verify that the image was loaded on
* all processes.
*/
@@ -3393,7 +3393,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* Verify that all other processes receive the cache image block
* from process 0.
*
- * Since we have alread verified that only process 0 has read the
+ * Since we have already verified that only process 0 has read the
* image, it is sufficient to verify that the image was loaded on
* all processes.
*/
@@ -3488,7 +3488,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* Verify that all other processes receive the cache image block
* from process 0.
*
- * Since we have alread verified that only process 0 has read the
+ * Since we have already verified that only process 0 has read the
* image, it is sufficient to verify that the image was loaded on
* all processes.
*/
@@ -3533,9 +3533,9 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 13) Get the size of the file. Verify that it is less
- * than 20 KB. Without deletions and persistant free
+ * than 20 KB. Without deletions and persistent free
* space managers, size size is about 30 MB, so this
- * is sufficient to verify that the persistant free
+ * is sufficient to verify that the persistent free
* space managers are more or less doing their job.
*
* Note that this test will have to change if we use
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 651a392..104460a 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -155,7 +155,7 @@ coll_chunk2(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
@@ -209,7 +209,7 @@ coll_chunk3(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
@@ -261,7 +261,7 @@ coll_chunk4(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
@@ -315,7 +315,7 @@ coll_chunk5(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
@@ -367,7 +367,7 @@ coll_chunk6(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
@@ -419,7 +419,7 @@ coll_chunk7(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
@@ -471,7 +471,7 @@ coll_chunk8(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
@@ -523,7 +523,7 @@ coll_chunk9(void)
*/
/* ------------------------------------------------------------------------
- * Descriptions for the selection: one singular selection accross many chunks
+ * Descriptions for the selection: one singular selection across many chunks
* Two dimensions, Num of chunks = 2* mpi_size
*
* dim1 = SPACE_DIM1*mpi_size
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
index fd62eb6..cabdea0 100644
--- a/testpar/t_coll_md_read.c
+++ b/testpar/t_coll_md_read.c
@@ -34,10 +34,9 @@
#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE 20000
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE 1
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
/*
* A test for issue HDFFV-10501. A parallel hang was reported which occurred
@@ -339,21 +338,34 @@ test_multi_chunk_io_addrmap_issue(void)
* collective metadata reads being made only by process 0 in H5D__sort_chunk().
*
* NOTE: Due to the way that the threshold value which pertains to this test
- * is currently calculated within HDF5, there are several conditions that this
- * test must maintain. Refer to the function H5D__sort_chunk in H5Dmpio.c for
- * a better idea of why.
+ * is currently calculated within HDF5, the following two conditions must be
+ * true to trigger the issue:
*
- * Condition 1: We need to make sure that the test always selects every single
- * chunk in the dataset. It is fine if the selection is split up among multiple
- * ranks, but their combined selection must cover the whole dataset.
+ * Condition 1: A certain threshold ratio must be met in order to have HDF5
+ * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is
+ * given by the following:
*
- * Condition 2: The number of chunks in the dataset divided by the number of MPI
- * ranks must exceed or equal 10000. In other words, each MPI rank must be
- * responsible for 10000 or more unique chunks.
+ * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30%
*
- * Condition 3: This test will currently only be reliably reproducable for 2 or 3
- * MPI ranks. The threshold value calculated reduces to a constant 100 / mpi_size,
- * and is compared against a default value of 30%.
+ * where:
+ * * `sum_chunk` is the combined sum of the number of chunks selected in
+ * the dataset by all ranks (chunks selected by more than one rank count
+ * individually toward the sum for each rank selecting that chunk)
+ * * `dataset_nchunks` is the number of chunks in the dataset (selected
+ * or not)
+ * * `mpi_size` is the size of the MPI Communicator
+ *
+ * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain
+ * threshold (as of this writing, 10000).
+ *
+ * To satisfy both these conditions, we #define a macro,
+ * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the
+ * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the
+ * 10000 threshold from condition 2). We then create a dataset of that many
+ * chunks and have each MPI rank write to and read from a piece of every single
+ * chunk in the dataset. This ensures chunk utilization is the max possible
+ * and exceeds our 30% target ratio, while always exactly matching the numeric
+ * chunk threshold value of condition 2.
*
* Failure in this test may either cause a hang, or, due to how the MPI calls
* pertaining to this issue might mistakenly match up, may cause an MPI error
@@ -375,10 +387,9 @@ void
test_link_chunk_io_sort_chunk_issue(void)
{
const char *filename;
- hsize_t * dataset_dims = NULL;
- hsize_t max_dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t sel_dims[1];
- hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS] = {LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS};
+ hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
@@ -412,14 +423,13 @@ test_link_chunk_io_sort_chunk_issue(void)
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
VRFY((file_id >= 0), "H5Fcreate succeeded");
- dataset_dims = HDmalloc(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS * sizeof(*dataset_dims));
- VRFY((dataset_dims != NULL), "malloc succeeded");
-
- dataset_dims[0] = (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * (hsize_t)mpi_size *
- (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
- max_dataset_dims[0] = H5S_UNLIMITED;
+ /*
+ * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM
+ * chunks, where every rank writes to a piece of every single chunk to keep utilization high.
+ */
+ dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
- fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, max_dataset_dims);
+ fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL);
VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
/*
@@ -428,6 +438,9 @@ test_link_chunk_io_sort_chunk_issue(void)
dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+ /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */
+ chunk_dims[0] = (hsize_t)mpi_size;
+
VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0),
"H5Pset_chunk succeeded");
@@ -437,23 +450,21 @@ test_link_chunk_io_sort_chunk_issue(void)
/*
* Setup hyperslab selection to split the dataset among the ranks.
- *
- * The ranks will write rows across the dataset.
*/
- stride[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
- count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / (hsize_t)mpi_size;
- start[0] = count[0] * (hsize_t)mpi_rank;
- block[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
+ start[0] = (hsize_t)mpi_rank;
+ stride[0] = (hsize_t)mpi_size;
+ count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
+ block[0] = 1;
VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
"H5Sselect_hyperslab succeeded");
- sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+ sel_dims[0] = count[0];
mspace_id = H5Screate_simple(1, sel_dims, NULL);
VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
- data = HDcalloc(1, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ data = HDcalloc(1, count[0] * sizeof(int));
VRFY((data != NULL), "calloc succeeded");
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
@@ -476,33 +487,25 @@ test_link_chunk_io_sort_chunk_issue(void)
VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
"H5Pset_dxpl_mpio_chunk_opt succeeded");
- read_buf = HDmalloc(count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ read_buf = HDmalloc(count[0] * sizeof(int));
VRFY((read_buf != NULL), "malloc succeeded");
VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
"H5Sselect_hyperslab succeeded");
- sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+ sel_dims[0] = count[0];
VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
mspace_id = H5Screate_simple(1, sel_dims, NULL);
VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
- read_buf = HDrealloc(read_buf, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
- VRFY((read_buf != NULL), "realloc succeeded");
-
/*
* Finally have each rank read their section of data back from the dataset.
*/
VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
"H5Dread succeeded");
- if (dataset_dims) {
- HDfree(dataset_dims);
- dataset_dims = NULL;
- }
-
if (data) {
HDfree(data);
data = NULL;
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index bbd4b28..8616bef 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2605,7 +2605,7 @@ compress_readAll(void)
nerrors++;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
VRFY((ret >= 0), "H5Dwrite succeeded");
#endif
@@ -2881,7 +2881,7 @@ none_selection_chunk(void)
* Simple independent I/O. This tests that the defaults are properly set.
*
* TEST_ACTUAL_IO_RESET:
- * Perfroms collective and then independent I/O wit hthe same dxpl to
+ * Performs collective and then independent I/O with hthe same dxpl to
* make sure the peroperty is correctly reset to the default on each use.
* Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE
* (The most complex case that works on all builds) and then performs
@@ -2890,7 +2890,7 @@ none_selection_chunk(void)
* Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
* is not needed as they are covered by DIRECT_CHUNK_MIX and
* MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
- * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold.
+ * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold.
*
* Modification:
* - Refctore to remove multi-chunk-without-opimization test and update for
@@ -2905,12 +2905,12 @@ none_selection_chunk(void)
static void
test_actual_io_mode(int selection_mode)
{
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
- H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
const char * filename;
const char * test_name;
hbool_t direct_multi_chunk_io;
@@ -3078,7 +3078,7 @@ test_actual_io_mode(int selection_mode)
break;
/* RESET tests that the properties are properly reset to defaults each time I/O is
- * performed. To acheive this, we have RESET perform collective I/O (which would change
+ * performed. To achieve this, we have RESET perform collective I/O (which would change
* the values from the defaults) followed by independent I/O (which should report the
* default values). RESET doesn't need to have a unique selection, so we reuse
* MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
@@ -3094,7 +3094,7 @@ test_actual_io_mode(int selection_mode)
* assgigned collective I/O, while each other process gets independent I/O.
* Since the root process with only access the first chunk, it will report
* collective I/O. The subsequent processes will access the first chunk
- * collectively, and their other chunk indpendently, reporting mixed I/O.
+ * collectively, and their other chunk independently, reporting mixed I/O.
*/
if (mpi_rank == 0) {
@@ -3163,8 +3163,8 @@ test_actual_io_mode(int selection_mode)
default:
test_name = "Undefined Selection Mode";
- actual_chunk_opt_mode_expected = -1;
- actual_io_mode_expected = -1;
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
break;
}
@@ -3209,7 +3209,7 @@ test_actual_io_mode(int selection_mode)
ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
- /* set this to manipulate testing senario about allocating processes
+ /* set this to manipulate testing scenario about allocating processes
* to chunks */
ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
@@ -3233,12 +3233,12 @@ test_actual_io_mode(int selection_mode)
H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
- /* Retreive Actual io valuess */
+ /* Retrieve Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retriving actual io mode suceeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
@@ -3246,12 +3246,12 @@ test_actual_io_mode(int selection_mode)
H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
- /* Retreive Actual io values */
+ /* Retrieve Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retriving actual io mode succeeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
/* Check write vs read */
VRFY((actual_io_mode_read == actual_io_mode_write),
@@ -3272,7 +3272,7 @@ test_actual_io_mode(int selection_mode)
actual_io_mode_write);
}
- /* To test that the property is succesfully reset to the default, we perform some
+ /* To test that the property is successfully reset to the default, we perform some
* independent I/O after the collective I/O
*/
if (selection_mode == TEST_ACTUAL_IO_RESET) {
@@ -3289,9 +3289,9 @@ test_actual_io_mode(int selection_mode)
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
- VRFY((ret >= 0), "retriving actual io mode succeeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
"actual_chunk_opt_mode has correct value for reset write (independent)");
@@ -3304,9 +3304,9 @@ test_actual_io_mode(int selection_mode)
/* Check Properties */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
- VRFY((ret >= 0), "retriving actual io mode succeeded");
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
- VRFY((ret >= 0), "retriving actual chunk opt mode succeeded");
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
"actual_chunk_opt_mode has correct value for reset read (independent)");
@@ -3359,11 +3359,11 @@ actual_io_mode_tests(void)
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
- /* The Multi Chunk Mixed test requires atleast three processes. */
+ /* The Multi Chunk Mixed test requires at least three processes. */
if (mpi_size > 2)
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
else
- HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
+ HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n");
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
@@ -3407,7 +3407,7 @@ actual_io_mode_tests(void)
* Test for Data Type Conversion as the cause of breaking collective I/O.
*
* TEST_DATA_TRANSFORMS:
- * Test for Data Transfrom feature as the cause of breaking collective I/O.
+ * Test for Data Transform feature as the cause of breaking collective I/O.
*
* TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
* Test for NULL dataspace as the cause of breaking collective I/O.
@@ -3418,12 +3418,6 @@ actual_io_mode_tests(void)
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
* Test for Externl-File storage as the cause of breaking collective I/O.
*
- * TEST_FILTERS:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter
- * feature. Use test_no_collective_cause_mode_filter() function instead.
- *
- *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
@@ -3465,9 +3459,6 @@ test_no_collective_cause_mode(int selection_mode)
hid_t file_space = -1;
hsize_t chunk_dims[RANK];
herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
/* set to global value as default */
int l_facc_type = facc_type;
char message[256];
@@ -3499,21 +3490,6 @@ test_no_collective_cause_mode(int selection_mode)
is_chunked = 0;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY((ret >= 0), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &filter_info);
- VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
- (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
- "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0), "set filter (flecher32) succeeded");
- }
-#endif /* LATER */
-
if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
sid = H5Screate(H5S_NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -3571,7 +3547,7 @@ test_no_collective_cause_mode(int selection_mode)
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
- test_name = "Broken Collective I/O - DATA Transfroms";
+ test_name = "Broken Collective I/O - DATA Transforms";
no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS;
no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS;
}
@@ -3589,14 +3565,6 @@ test_no_collective_cause_mode(int selection_mode)
no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected |= H5D_MPIO_FILTERS;
- no_collective_cause_global_expected |= H5D_MPIO_FILTERS;
- }
-#endif /* LATER */
-
if (selection_mode & TEST_COLLECTIVE) {
test_name = "Broken Collective I/O - Not Broken";
no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
@@ -3669,7 +3637,7 @@ test_no_collective_cause_mode(int selection_mode)
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
&no_collective_cause_global_write);
- VRFY((ret >= 0), "retriving no collective cause succeeded");
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
/*---------------------
* Test Read access
@@ -3689,7 +3657,7 @@ test_no_collective_cause_mode(int selection_mode)
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
&no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded");
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
/* Check write vs read */
VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
@@ -3699,10 +3667,12 @@ test_no_collective_cause_mode(int selection_mode)
/* Test values */
HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ HDsnprintf(message, sizeof(message),
+ "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ HDsnprintf(message, sizeof(message),
+ "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3733,240 +3703,6 @@ test_no_collective_cause_mode(int selection_mode)
return;
}
-/*
- * Function: test_no_collective_cause_mode_filter
- *
- * Purpose:
- * Test specific for using filter as a caus of broken collective I/O and
- * checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
- * have the correct values.
- *
- * NOTE:
- * This is a temporary function.
- * test_no_collective_cause_mode(TEST_FILTERS) will replace this when
- * H5Dcreate and H5write support for mpio and filter feature.
- *
- * Input:
- * TEST_FILTERS_READ:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- *
- * Programmer: Jonathan Kim
- * Date: Aug, 2012
- */
-#ifdef LATER
-static void
-test_no_collective_cause_mode_filter(int selection_mode)
-{
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_read = 0;
- uint32_t no_collective_cause_global_expected = 0;
-
- const char *filename;
- const char *test_name = "I/O";
- hbool_t is_chunked = 1;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int * buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_write = -1;
- hid_t fapl_read = -1;
- hid_t dcpl = -1;
- hid_t dxpl = -1;
- hsize_t dims[RANK];
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hsize_t chunk_dims[RANK];
- herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
- char message[256];
-
- /* Set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- HDassert(mpi_size >= 1);
-
- mpi_comm = MPI_COMM_WORLD;
- mpi_info = MPI_INFO_NULL;
-
- /* Create the dataset creation plist */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation plist created successfully");
-
- if (selection_mode == TEST_FILTERS_READ) {
-#ifdef LATER /* fletcher32 */
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY((ret >= 0), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, (unsigned int *)&filter_info);
- VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
- (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
- "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0), "set filter (flecher32) succeeded");
-#endif /* LATER */
- }
- else {
- VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
- }
-
- /* Create the basic Space */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- filename = (const char *)GetTestParameters();
- HDassert(filename != NULL);
-
- /* Setup the file access template */
- fapl_write = create_faccess_plist(mpi_comm, mpi_info, FACC_DEFAULT);
- VRFY((fapl_write >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_write);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* If we are not testing contiguous datasets */
- if (is_chunked) {
- /* Set up chunk information. */
- chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
- chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0), "chunk creation property list succeeded");
- }
-
- /* Create the dataset */
- dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
-#ifdef LATER /* fletcher32 */
- /* Set expected cause */
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected = H5D_MPIO_FILTERS;
- no_collective_cause_global_expected = H5D_MPIO_FILTERS;
-#endif /* LATER */
-
- /* Get the file dataspace */
- file_space = H5Dget_space(dataset);
- VRFY((file_space >= 0), "H5Dget_space succeeded");
-
- /* Create the memory dataspace */
- mem_space = H5Screate_simple(RANK, dims, NULL);
- VRFY((mem_space >= 0), "mem_space created");
-
- /* Get the number of elements in the selection */
- length = dim0 * dim1;
-
- /* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for (i = 0; i < length; i++)
- buffer[i] = i;
-
- /* Set up the dxpl for the write */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if (selection_mode == TEST_FILTERS_READ) {
- /* To test read in collective I/O mode , write in independent mode
- * because write fails with mpio + filter */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
- else {
- /* To test write in collective I/O mode. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
-
- /* Write */
- ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
- /* Make a copy of the dxpl to test the read operation */
- dxpl = H5Pcopy(dxpl);
- VRFY((dxpl >= 0), "H5Pcopy succeeded");
-
- if (dataset)
- H5Dclose(dataset);
- if (fapl_write)
- H5Pclose(fapl_write);
- if (fid)
- H5Fclose(fid);
-
- /*---------------------
- * Test Read access
- *---------------------*/
-
- /* Setup the file access template */
- fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type);
- VRFY((fapl_read >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl_read);
- dataset = H5Dopen2(fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
-
- /* Set collective I/O properties in the dxpl. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Read */
- ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
- /* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause(dxpl, &no_collective_cause_local_read,
- &no_collective_cause_global_read);
- VRFY((ret >= 0), "retriving no collective cause succeeded");
-
- /* Test values */
- HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
- VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
- VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
-
- /* Release some resources */
- if (sid)
- H5Sclose(sid);
- if (fapl_read)
- H5Pclose(fapl_read);
- if (dcpl)
- H5Pclose(dcpl);
- if (dxpl)
- H5Pclose(dxpl);
- if (dataset)
- H5Dclose(dataset);
- if (mem_space)
- H5Sclose(mem_space);
- if (file_space)
- H5Sclose(file_space);
- if (fid)
- H5Fclose(fid);
- HDfree(buffer);
- return;
-}
-#endif
-
/* Function: no_collective_cause_tests
*
* Purpose: Tests cases for broken collective IO.
@@ -3987,13 +3723,6 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
-#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
- /* test_no_collective_cause_mode (TEST_FILTERS); */
- test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
-#endif /* LATER */
/*
* Test combined causes
@@ -4114,7 +3843,7 @@ dataset_atomicity(void)
/* file locking allows only one file open (serial) for writing */
if (MAINPROCESS) {
fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fopen succeeed");
+ VRFY((fid >= 0), "H5Fopen succeeded");
/* should fail */
H5E_BEGIN_TRY
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 9554052..9970538 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -106,7 +106,7 @@ test_split_comm_access(void)
/* delete the test file */
if (sub_mpi_rank == 0) {
- mrc = MPI_File_delete((char *)filename, info);
+ mrc = MPI_File_delete(filename, info);
/*VRFY((mrc==MPI_SUCCESS), ""); */
}
}
@@ -180,7 +180,7 @@ test_page_buffer_access(void)
data = (int *)HDmalloc(sizeof(int) * (size_t)num_elements);
- /* intialize all the elements to have a value of -1 */
+ /* initialize all the elements to have a value of -1 */
for (i = 0; i < num_elements; i++)
data[i] = -1;
if (MAINPROCESS) {
@@ -516,25 +516,25 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsprintf(dset_name, "D1dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D2dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D3dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -557,13 +557,13 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
for (i = 0; i < num_elements; i++)
VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
- HDsprintf(dset_name, "D1dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D2dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D3dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
}
@@ -669,7 +669,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsprintf(dset_name, "dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index 78af0fb..8a55519 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -26,73 +26,139 @@
const char *FILENAME[] = {"t_filters_parallel", NULL};
char filenames[1][256];
+static MPI_Comm comm = MPI_COMM_WORLD;
+static MPI_Info info = MPI_INFO_NULL;
+static int mpi_rank;
+static int mpi_size;
+
int nerrors = 0;
-size_t cur_filter_idx = 0;
-#define GZIP_INDEX 0
-#define FLETCHER32_INDEX 1
+/* Arrays of filter ID values and filter names (should match each other) */
+H5Z_filter_t filterIDs[] = {
+ H5Z_FILTER_DEFLATE, H5Z_FILTER_SHUFFLE, H5Z_FILTER_FLETCHER32,
+ H5Z_FILTER_SZIP, H5Z_FILTER_NBIT, H5Z_FILTER_SCALEOFFSET,
+};
+
+const char *filterNames[] = {"Deflate", "Shuffle", "Fletcher32", "SZIP", "Nbit", "ScaleOffset"};
-#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+/* Function pointer typedef for test functions */
+typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id);
+
+/* Typedef for filter arguments for user-defined filters */
+typedef struct filter_options_t {
+ unsigned int flags;
+ size_t cd_nelmts;
+ const unsigned int cd_values[];
+} filter_options_t;
/*
- * Used to check if a filter is available before running a test.
+ * Enum for verify_space_alloc_status which specifies
+ * how many chunks have been written to in a dataset
*/
-#define CHECK_CUR_FILTER_AVAIL() \
- { \
- htri_t filter_is_avail; \
- \
- if (cur_filter_idx == GZIP_INDEX) { \
- if ((filter_is_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) != TRUE) { \
- if (MAINPROCESS) { \
- HDputs(" - SKIPPED - Deflate filter not available"); \
- } \
- return; \
- } \
- } \
- }
+typedef enum num_chunks_written_t {
+ DATASET_JUST_CREATED,
+ NO_CHUNKS_WRITTEN,
+ SOME_CHUNKS_WRITTEN,
+ ALL_CHUNKS_WRITTEN
+} num_chunks_written_t;
-static herr_t set_dcpl_filter(hid_t dcpl);
+static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options);
+static herr_t verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written);
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
/* Tests for writing data in parallel */
-static void test_write_one_chunk_filtered_dataset(void);
-static void test_write_filtered_dataset_no_overlap(void);
-static void test_write_filtered_dataset_overlap(void);
-static void test_write_filtered_dataset_single_no_selection(void);
-static void test_write_filtered_dataset_all_no_selection(void);
-static void test_write_filtered_dataset_point_selection(void);
-static void test_write_filtered_dataset_interleaved_write(void);
-static void test_write_transformed_filtered_dataset_no_overlap(void);
-static void test_write_3d_filtered_dataset_no_overlap_separate_pages(void);
-static void test_write_3d_filtered_dataset_no_overlap_same_pages(void);
-static void test_write_3d_filtered_dataset_overlap(void);
-static void test_write_cmpd_filtered_dataset_no_conversion_unshared(void);
-static void test_write_cmpd_filtered_dataset_no_conversion_shared(void);
-static void test_write_cmpd_filtered_dataset_type_conversion_unshared(void);
-static void test_write_cmpd_filtered_dataset_type_conversion_shared(void);
+static void test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_single_unlim_dim_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_single_unlim_dim_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_multi_unlim_dim_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_transformed_filtered_dataset_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_3d_filtered_dataset_no_overlap_separate_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_3d_filtered_dataset_no_overlap_same_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_no_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_no_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_type_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_type_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
#endif
/* Tests for reading data in parallel */
-static void test_read_one_chunk_filtered_dataset(void);
-static void test_read_filtered_dataset_no_overlap(void);
-static void test_read_filtered_dataset_overlap(void);
-static void test_read_filtered_dataset_single_no_selection(void);
-static void test_read_filtered_dataset_all_no_selection(void);
-static void test_read_filtered_dataset_point_selection(void);
-static void test_read_filtered_dataset_interleaved_read(void);
-static void test_read_transformed_filtered_dataset_no_overlap(void);
-static void test_read_3d_filtered_dataset_no_overlap_separate_pages(void);
-static void test_read_3d_filtered_dataset_no_overlap_same_pages(void);
-static void test_read_3d_filtered_dataset_overlap(void);
-static void test_read_cmpd_filtered_dataset_no_conversion_unshared(void);
-static void test_read_cmpd_filtered_dataset_no_conversion_shared(void);
-static void test_read_cmpd_filtered_dataset_type_conversion_unshared(void);
-static void test_read_cmpd_filtered_dataset_type_conversion_shared(void);
-
-#if MPI_VERSION >= 3
-/* Other miscellaneous tests */
-static void test_shrinking_growing_chunks(void);
-#endif
+static void test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_transformed_filtered_dataset_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_3d_filtered_dataset_no_overlap_separate_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_3d_filtered_dataset_no_overlap_same_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_no_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_no_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_type_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
/*
* Tests for attempting to round-trip the data going from
@@ -103,21 +169,40 @@ static void test_shrinking_growing_chunks(void);
*
* written in parallel -> read serially
*/
-static void test_write_serial_read_parallel(void);
-#if MPI_VERSION >= 3
-static void test_write_parallel_read_serial(void);
-#endif
+static void test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
-static MPI_Comm comm = MPI_COMM_WORLD;
-static MPI_Info info = MPI_INFO_NULL;
-static int mpi_rank;
-static int mpi_size;
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
+static void test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
-static void (*tests[])(void) = {
-#if MPI_VERSION >= 3
+/* Other miscellaneous tests */
+static void test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_edge_chunks_partial_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id);
+static void test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+#endif
+
+static test_func tests[] = {
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
test_write_one_chunk_filtered_dataset,
test_write_filtered_dataset_no_overlap,
+ test_write_filtered_dataset_no_overlap_partial,
test_write_filtered_dataset_overlap,
+ test_write_filtered_dataset_single_unlim_dim_no_overlap,
+ test_write_filtered_dataset_single_unlim_dim_overlap,
+ test_write_filtered_dataset_multi_unlim_dim_no_overlap,
+ test_write_filtered_dataset_multi_unlim_dim_overlap,
test_write_filtered_dataset_single_no_selection,
test_write_filtered_dataset_all_no_selection,
test_write_filtered_dataset_point_selection,
@@ -147,33 +232,168 @@ static void (*tests[])(void) = {
test_read_cmpd_filtered_dataset_type_conversion_unshared,
test_read_cmpd_filtered_dataset_type_conversion_shared,
test_write_serial_read_parallel,
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
test_write_parallel_read_serial,
test_shrinking_growing_chunks,
+ test_edge_chunks_no_overlap,
+ test_edge_chunks_overlap,
+ test_edge_chunks_partial_write,
+ test_fill_values,
+ test_fill_value_undefined,
+ test_fill_time_never,
#endif
};
/*
* Function to call the appropriate HDF5 filter-setting function
- * depending on the currently set index. Used to re-run the tests
+ * depending on the given filter ID. Used to re-run the tests
* with different filters to check that the data still comes back
* correctly under a variety of circumstances, such as the
* Fletcher32 checksum filter increasing the size of the chunk.
*/
static herr_t
-set_dcpl_filter(hid_t dcpl)
+set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options)
+{
+ switch (filter_id) {
+ case H5Z_FILTER_DEFLATE:
+ return H5Pset_deflate(dcpl_id, DEFAULT_DEFLATE_LEVEL);
+ case H5Z_FILTER_SHUFFLE:
+ return H5Pset_shuffle(dcpl_id);
+ case H5Z_FILTER_FLETCHER32:
+ return H5Pset_fletcher32(dcpl_id);
+ case H5Z_FILTER_SZIP: {
+ unsigned pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK;
+ hsize_t chunk_dims[H5S_MAX_RANK] = {0};
+ size_t i, chunk_nelemts;
+
+ VRFY(H5Pget_chunk(dcpl_id, H5S_MAX_RANK, chunk_dims) >= 0, "H5Pget_chunk succeeded");
+
+ for (i = 0, chunk_nelemts = 1; i < H5S_MAX_RANK; i++)
+ if (chunk_dims[i] > 0)
+ chunk_nelemts *= chunk_dims[i];
+
+ if (chunk_nelemts < H5_SZIP_MAX_PIXELS_PER_BLOCK) {
+ /*
+ * Can't set SZIP for chunk of 1 data element.
+ * Pixels-per-block value must be both even
+ * and non-zero.
+ */
+ if (chunk_nelemts == 1)
+ return SUCCEED;
+
+ if ((chunk_nelemts % 2) == 0)
+ pixels_per_block = (unsigned)chunk_nelemts;
+ else
+ pixels_per_block = (unsigned)(chunk_nelemts - 1);
+ }
+ else
+ pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK;
+
+ return H5Pset_szip(dcpl_id, 0, pixels_per_block);
+ }
+ case H5Z_FILTER_NBIT:
+ return H5Pset_nbit(dcpl_id);
+ case H5Z_FILTER_SCALEOFFSET:
+ return H5Pset_scaleoffset(dcpl_id, H5Z_SO_INT, 0);
+ default: {
+ if (!filter_options)
+ return FAIL;
+
+ return H5Pset_filter(dcpl_id, filter_id, filter_options->flags, filter_options->cd_nelmts,
+ filter_options->cd_values);
+ }
+ }
+}
+
+/*
+ * Function to verify the status of dataset storage space allocation
+ * based on the dataset's allocation time setting and how many chunks
+ * in the dataset have been written to.
+ */
+static herr_t
+verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written)
{
- switch (cur_filter_idx) {
- case GZIP_INDEX:
- return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
- case FLETCHER32_INDEX:
- return H5Pset_fletcher32(dcpl);
- default:
- return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
+ int nfilters;
+ herr_t ret_value = SUCCEED;
+
+ VRFY(((nfilters = H5Pget_nfilters(dcpl_id)) >= 0), "H5Pget_nfilters succeeded");
+
+ /*
+ * Only verify space allocation status when there are filters
+ * in the dataset's filter pipeline. When filters aren't in the
+ * pipeline, the space allocation time and status can vary based
+ * on whether the file was created in parallel or serial mode.
+ */
+ if (nfilters > 0) {
+ H5D_space_status_t space_status;
+ H5D_alloc_time_t alloc_time;
+
+ VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
+ VRFY((H5Dget_space_status(dset_id, &space_status) >= 0), "H5Dget_space_status succeeded");
+
+ switch (alloc_time) {
+ case H5D_ALLOC_TIME_EARLY:
+ /*
+ * Early space allocation should always result in the
+ * full dataset storage space being allocated.
+ */
+ VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status");
+ break;
+ case H5D_ALLOC_TIME_LATE:
+ /*
+ * Late space allocation should always result in the
+ * full dataset storage space being allocated when
+ * the dataset gets written to. However, if the dataset
+ * is extended the dataset's space allocation status
+ * can become partly allocated until the dataset is
+ * written to again.
+ */
+ if (chunks_written == SOME_CHUNKS_WRITTEN || chunks_written == ALL_CHUNKS_WRITTEN)
+ VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED) ||
+ (space_status == H5D_SPACE_STATUS_PART_ALLOCATED),
+ "verified space allocation status");
+ else if (chunks_written == NO_CHUNKS_WRITTEN)
+ /*
+ * A special case where we wrote to a dataset that
+ * uses late space allocation, but the write was
+ * either a no-op (no selection in the dataset
+ * from any rank) or something caused the write to
+ * fail late in the process of performing the actual
+ * write. In either case, space should still have
+ * been allocated.
+ */
+ VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status");
+ else
+ VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status");
+ break;
+ case H5D_ALLOC_TIME_DEFAULT:
+ case H5D_ALLOC_TIME_INCR:
+ /*
+ * Incremental space allocation should result in
+ * the dataset's storage space being incrementally
+ * allocated as chunks are written to. Once all chunks
+ * have been written to, the space allocation should be
+ * seen as fully allocated.
+ */
+ if (chunks_written == SOME_CHUNKS_WRITTEN)
+ VRFY((space_status == H5D_SPACE_STATUS_PART_ALLOCATED),
+ "verified space allocation status");
+ else if (chunks_written == ALL_CHUNKS_WRITTEN)
+ VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "verified space allocation status");
+ else
+ VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status");
+ break;
+ default:
+ if (MAINPROCESS)
+ MESG("unknown space allocation time");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
}
+
+ return ret_value;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
/*
* Tests parallel write of filtered data in the special
* case where a dataset is composed of a single chunk.
@@ -182,7 +402,8 @@ set_dcpl_filter(hid_t dcpl)
* 02/01/2017
*/
static void
-test_write_one_chunk_filtered_dataset(void)
+test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -195,26 +416,18 @@ test_write_one_chunk_filtered_dataset(void)
hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to one-chunk filtered dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS;
@@ -231,19 +444,21 @@ test_write_one_chunk_filtered_dataset(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -293,15 +508,12 @@ test_write_one_chunk_filtered_dataset(void)
((C_DATATYPE)i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -311,10 +523,10 @@ test_write_one_chunk_filtered_dataset(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -324,10 +536,11 @@ test_write_one_chunk_filtered_dataset(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -343,7 +556,8 @@ test_write_one_chunk_filtered_dataset(void)
* 02/01/2017
*/
static void
-test_write_filtered_dataset_no_overlap(void)
+test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -356,27 +570,18 @@ test_write_filtered_dataset_no_overlap(void)
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NROWS;
@@ -393,20 +598,22 @@ test_write_filtered_dataset_no_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -454,15 +661,168 @@ test_write_filtered_dataset_no_overlap(void)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
(i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ if (data)
+ HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where only
+ * one process is writing to a particular chunk in the operation
+ * and that process only writes to part of a chunk.
+ */
+static void
+test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing partial write to unshared filtered chunks");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ sel_dims[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS /
+ WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS);
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS /
+ WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS);
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ block[1] = (hsize_t)1;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t rank_n_elems = (size_t)(mpi_size * (WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS *
+ WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS));
+ size_t data_idx = i;
+
+ for (size_t j = 0; j < rank_n_elems; j++) {
+ if ((j % WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS) == 0) {
+ correct_buf[(i * rank_n_elems) + j] = (C_DATATYPE)data_idx;
+ data_idx++;
+ }
+ }
+ }
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -472,10 +832,10 @@ test_write_filtered_dataset_no_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -485,10 +845,10 @@ test_write_filtered_dataset_no_overlap(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -505,7 +865,8 @@ test_write_filtered_dataset_no_overlap(void)
* 02/01/2017
*/
static void
-test_write_filtered_dataset_overlap(void)
+test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -518,27 +879,18 @@ test_write_filtered_dataset_overlap(void)
hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to shared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NROWS;
@@ -555,20 +907,22 @@ test_write_filtered_dataset_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -616,15 +970,12 @@ test_write_filtered_dataset_overlap(void)
(dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
(((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -634,10 +985,10 @@ test_write_filtered_dataset_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -647,10 +998,650 @@ test_write_filtered_dataset_overlap(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has a single unlimited dimension and each
+ * MPI rank writes to its own separate chunk. On each
+ * iteration, the dataset is extended in its extensible
+ * dimension by "MPI size" chunks per rank and the new
+ * chunks are written to, read back and verified.
+ */
+static void
+test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks w/ single unlimited dimension");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS;
+ max_dims[0] = dataset_dims[0];
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ for (i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] =
+ (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS / (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * block[0] * count[0]);
+ start[1] = i * count[1] * block[1];
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify the correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
+ /* Extend the dataset by count[1] chunks in the extensible dimension */
+ dataset_dims[1] += count[1] * block[1];
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has a single unlimited dimension and each
+ * MPI rank writes to a portion of each chunk in the dataset.
+ * On each iteration, the dataset is extended in its extensible
+ * dimension by two chunks and the new chunks are written to
+ * by all ranks, then read back and verified.
+ */
+static void
+test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered chunks w/ single unlimited dimension");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NCOLS;
+ max_dims[0] = dataset_dims[0];
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ for (i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NROWS / (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS;
+ count[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NCOLS / (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = i * count[1] * block[1];
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
+ /* Extend the dataset by count[1] chunks in the extensible dimension */
+ dataset_dims[1] += count[1] * block[1];
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has two unlimited dimensions and each
+ * MPI rank writes to its own separate chunks. On each
+ * iteration, the dataset is extended in its first
+ * extensible dimension by the size of one chunk per rank
+ * and in its second extensible dimension by the size of
+ * one chunk. Then, all chunks are written to, read back
+ * and verified.
+ */
+static void
+test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks w/ two unlimited dimensions");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS;
+ max_dims[0] = H5S_UNLIMITED;
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ for (i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
+ C_DATATYPE *tmp_realloc = NULL;
+ size_t j;
+
+ /* Set selected dimensions */
+ sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(data, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ data = tmp_realloc;
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(read_buf, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ read_buf = tmp_realloc;
+
+ for (j = 0; j < data_size / sizeof(*data); j++)
+ data[j] = (C_DATATYPE)GEN_DATA(j);
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (i + 1);
+ count[1] = (i + 1);
+ stride[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * block[0] * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify the correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
+ /*
+ * Extend the dataset by the size of one chunk per rank
+ * in the first extensible dimension. Extend the dataset
+ * by the size of chunk in the second extensible dimension.
+ */
+ dataset_dims[0] += (hsize_t)mpi_size * block[0];
+ dataset_dims[1] += block[1];
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has two unlimited dimensions and each MPI
+ * rank writes to a portion of each chunk in the dataset.
+ * On each iteration, the dataset is extended in its extensible
+ * dimensions by the size of a chunk and then all chunks are
+ * written to by all ranks, then read back and verified.
+ */
+static void
+test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered chunks w/ two unlimited dimensions");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_NCOLS;
+ max_dims[0] = H5S_UNLIMITED;
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ for (i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
+ C_DATATYPE *tmp_realloc = NULL;
+ size_t j;
+
+ /* Set selected dimensions */
+ sel_dims[0] = (i + 1);
+ sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(data, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ data = tmp_realloc;
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(read_buf, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ read_buf = tmp_realloc;
+
+ for (j = 0; j < data_size / sizeof(*data); j++)
+ data[j] = (C_DATATYPE)GEN_DATA(j);
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (i + 1);
+ count[1] = (i + 1);
+ stride[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
+ /* Extend the dataset by the size of a chunk in each extensible dimension */
+ dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -669,7 +1660,8 @@ test_write_filtered_dataset_overlap(void)
* 02/01/2017
*/
static void
-test_write_filtered_dataset_single_no_selection(void)
+test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -683,27 +1675,18 @@ test_write_filtered_dataset_single_no_selection(void)
hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
size_t segment_length;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to filtered chunks with a single process having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
@@ -723,20 +1706,22 @@ test_write_filtered_dataset_single_no_selection(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -774,15 +1759,17 @@ test_write_filtered_dataset_single_no_selection(void)
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *)HDcalloc(1, data_size);
- VRFY((NULL != data), "HDcalloc succeeded");
+ if (mpi_rank != WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+ }
correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
-
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
(i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
@@ -793,15 +1780,12 @@ test_write_filtered_dataset_single_no_selection(void)
((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
0, segment_length * sizeof(*data));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status - data should only have been written if MPI size > 1 */
+ verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1 ? SOME_CHUNKS_WRITTEN : NO_CHUNKS_WRITTEN));
+
if (data)
HDfree(data);
@@ -811,10 +1795,10 @@ test_write_filtered_dataset_single_no_selection(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -824,10 +1808,11 @@ test_write_filtered_dataset_single_no_selection(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -847,7 +1832,8 @@ test_write_filtered_dataset_single_no_selection(void)
* 02/02/2017
*/
static void
-test_write_filtered_dataset_all_no_selection(void)
+test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -856,27 +1842,18 @@ test_write_filtered_dataset_all_no_selection(void)
hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to filtered chunks with all processes having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
@@ -892,20 +1869,22 @@ test_write_filtered_dataset_all_no_selection(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
filespace = H5Dget_space(dset_id);
@@ -926,15 +1905,12 @@ test_write_filtered_dataset_all_no_selection(void)
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE)GEN_DATA(i);
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status - no ranks should have written any data */
+ verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -944,10 +1920,10 @@ test_write_filtered_dataset_all_no_selection(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -957,10 +1933,11 @@ test_write_filtered_dataset_all_no_selection(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -974,7 +1951,8 @@ test_write_filtered_dataset_all_no_selection(void)
* 02/02/2017
*/
static void
-test_write_filtered_dataset_point_selection(void)
+test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -985,27 +1963,18 @@ test_write_filtered_dataset_point_selection(void)
hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, j, data_size, correct_buf_size;
size_t num_points;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to filtered chunks with point selection");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
@@ -1022,20 +1991,22 @@ test_write_filtered_dataset_point_selection(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Set up point selection */
@@ -1075,15 +2046,12 @@ test_write_filtered_dataset_point_selection(void)
(dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
(((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1093,10 +2061,10 @@ test_write_filtered_dataset_point_selection(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1108,10 +2076,11 @@ test_write_filtered_dataset_point_selection(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1129,7 +2098,8 @@ test_write_filtered_dataset_point_selection(void)
* 02/02/2017
*/
static void
-test_write_filtered_dataset_interleaved_write(void)
+test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1142,27 +2112,18 @@ test_write_filtered_dataset_interleaved_write(void)
hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing interleaved write to filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NROWS;
@@ -1179,20 +2140,22 @@ test_write_filtered_dataset_interleaved_write(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1251,15 +2214,12 @@ test_write_filtered_dataset_interleaved_write(void)
+ ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS *
(i / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS))));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1269,10 +2229,10 @@ test_write_filtered_dataset_interleaved_write(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1282,10 +2242,11 @@ test_write_filtered_dataset_interleaved_write(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1308,7 +2269,8 @@ test_write_filtered_dataset_interleaved_write(void)
* 08/20/2021
*/
static void
-test_write_transformed_filtered_dataset_no_overlap(void)
+test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1321,27 +2283,18 @@ test_write_transformed_filtered_dataset_no_overlap(void)
hsize_t count[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared transformed and filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS;
@@ -1358,19 +2311,22 @@ test_write_transformed_filtered_dataset_no_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1419,11 +2375,9 @@ test_write_transformed_filtered_dataset_no_overlap(void)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
(i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
- /* Create property list for collective dataset write and data transform */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ /* Create property list for data transform */
+ plist_id = H5Pcopy(dxpl_id);
+ VRFY((plist_id >= 0), "DXPL copy succeeded");
/* Set data transform expression */
VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
@@ -1440,7 +2394,7 @@ test_write_transformed_filtered_dataset_no_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
@@ -1448,6 +2402,13 @@ test_write_transformed_filtered_dataset_no_overlap(void)
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+
+ /* Verify space allocation status */
+ plist_id = H5Dget_create_plist(dset_id);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (correct_buf)
HDfree(correct_buf);
if (read_buf)
@@ -1457,6 +2418,7 @@ test_write_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1471,7 +2433,8 @@ test_write_transformed_filtered_dataset_no_overlap(void)
* 02/06/2017
*/
static void
-test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
+test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1484,27 +2447,18 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
@@ -1524,20 +2478,22 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1591,15 +2547,12 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1609,10 +2562,10 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1622,10 +2575,11 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1641,7 +2595,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
* 02/06/2017
*/
static void
-test_write_3d_filtered_dataset_no_overlap_same_pages(void)
+test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1654,27 +2609,18 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
@@ -1695,20 +2641,22 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1762,15 +2710,12 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
(i / (dataset_dims[0] * dataset_dims[1])));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1780,10 +2725,10 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1793,10 +2738,11 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1812,7 +2758,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
* 02/06/2017
*/
static void
-test_write_3d_filtered_dataset_overlap(void)
+test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1825,27 +2772,18 @@ test_write_3d_filtered_dataset_overlap(void)
hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to shared filtered chunks in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS;
@@ -1865,20 +2803,22 @@ test_write_3d_filtered_dataset_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1943,15 +2883,12 @@ test_write_3d_filtered_dataset_overlap(void)
(i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1961,10 +2898,10 @@ test_write_3d_filtered_dataset_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1974,10 +2911,11 @@ test_write_3d_filtered_dataset_overlap(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1992,7 +2930,8 @@ test_write_3d_filtered_dataset_overlap(void)
* 02/10/2017
*/
static void
-test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
+test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2005,28 +2944,27 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
@@ -2045,15 +2983,15 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2066,11 +3004,13 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
+ memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2124,13 +3064,10 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
if (data)
HDfree(data);
@@ -2141,11 +3078,11 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2154,11 +3091,12 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2173,7 +3111,8 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
* 02/10/2017
*/
static void
-test_write_cmpd_filtered_dataset_no_conversion_shared(void)
+test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2186,28 +3125,27 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id, dset_id, plist_id, memtype;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
@@ -2226,15 +3164,15 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2247,11 +3185,13 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2311,13 +3251,10 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
(((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
if (data)
HDfree(data);
@@ -2329,10 +3266,10 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id =
- H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2341,11 +3278,12 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2356,16 +3294,18 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
* chunks using a compound datatype which requires a
* datatype conversion.
*
- * NOTE: This test currently should fail because the
- * datatype conversion causes the parallel library to
- * break to independent I/O and this isn't allowed when
- * there are filters in the pipeline.
+ * NOTE: This test currently should fail for mpi_size > 1
+ * because the datatype conversion causes the parallel
+ * library to break to independent I/O and this isn't
+ * allowed when there are filters in the pipeline,
+ * unless there is only one MPI rank.
*
* Programmer: Jordan Henderson
* 02/07/2017
*/
static void
-test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
+test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2378,28 +3318,33 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ /* Skip for MPI communicator size of 1 */
+ if (mpi_size == 1) {
+ SKIPPED();
+ return;
+ }
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
@@ -2418,15 +3363,15 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2447,11 +3392,13 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2497,20 +3444,16 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
data[i].field3 = (long)GEN_DATA(i);
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
/* Ensure that this test currently fails since type conversions break collective mode */
H5E_BEGIN_TRY
{
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
- "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded");
}
H5E_END_TRY;
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -2520,11 +3463,11 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2533,12 +3476,13 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2549,16 +3493,18 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
* chunks using a compound datatype which requires
* a datatype conversion.
*
- * NOTE: This test currently should fail because the
- * datatype conversion causes the parallel library to
- * break to independent I/O and this isn't allowed when
- * there are filters in the pipeline.
+ * NOTE: This test currently should fail for mpi_size > 1
+ * because the datatype conversion causes the parallel
+ * library to break to independent I/O and this isn't
+ * allowed when there are filters in the pipeline,
+ * unless there is only one MPI rank.
*
* Programmer: Jordan Henderson
* 02/10/2017
*/
static void
-test_write_cmpd_filtered_dataset_type_conversion_shared(void)
+test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2571,28 +3517,33 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id, dset_id, plist_id, filetype, memtype;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs(
"Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ /* Skip for MPI communicator size of 1 */
+ if (mpi_size == 1) {
+ SKIPPED();
+ return;
+ }
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
@@ -2611,15 +3562,15 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2640,11 +3591,13 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2690,20 +3643,16 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
data[i].field3 = (long)GEN_DATA(i);
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
/* Ensure that this test currently fails since type conversions break collective mode */
H5E_BEGIN_TRY
{
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
- "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded");
}
H5E_END_TRY;
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -2713,11 +3662,11 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2726,12 +3675,13 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2751,7 +3701,8 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
* 05/14/2018
*/
static void
-test_read_one_chunk_filtered_dataset(void)
+test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -2765,16 +3716,15 @@ test_read_one_chunk_filtered_dataset(void)
hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from one-chunk filtered dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
@@ -2802,6 +3752,9 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -2810,44 +3763,43 @@ test_read_one_chunk_filtered_dataset(void)
chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size;
@@ -2887,18 +3839,12 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -2937,7 +3883,7 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2956,7 +3902,8 @@ test_read_one_chunk_filtered_dataset(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_no_overlap(void)
+test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -2970,16 +3917,15 @@ test_read_filtered_dataset_no_overlap(void)
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
@@ -3006,6 +3952,9 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3014,44 +3963,43 @@ test_read_filtered_dataset_no_overlap(void)
chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
@@ -3091,18 +4039,12 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -3141,7 +4083,7 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3161,7 +4103,8 @@ test_read_filtered_dataset_no_overlap(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_overlap(void)
+test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3175,16 +4118,15 @@ test_read_filtered_dataset_overlap(void)
hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from shared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS;
@@ -3211,6 +4153,9 @@ test_read_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3219,44 +4164,43 @@ test_read_filtered_dataset_overlap(void)
chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
@@ -3296,18 +4240,12 @@ test_read_filtered_dataset_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -3362,7 +4300,7 @@ test_read_filtered_dataset_overlap(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3382,7 +4320,8 @@ test_read_filtered_dataset_overlap(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_single_no_selection(void)
+test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3397,16 +4336,15 @@ test_read_filtered_dataset_single_no_selection(void)
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
size_t segment_length;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from filtered chunks with a single process having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3437,6 +4375,9 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
@@ -3446,44 +4387,43 @@ test_read_filtered_dataset_single_no_selection(void)
chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
@@ -3530,19 +4470,19 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, NULL) >= 0),
+ "Dataset read succeeded");
+ }
+ else {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+ }
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -3588,7 +4528,7 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3609,7 +4549,8 @@ test_read_filtered_dataset_single_no_selection(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_all_no_selection(void)
+test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3617,14 +4558,13 @@ test_read_filtered_dataset_all_no_selection(void)
hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing read from filtered chunks with all processes having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3646,6 +4586,9 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3654,44 +4597,43 @@ test_read_filtered_dataset_all_no_selection(void)
chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = sel_dims[1] = 0;
@@ -3705,20 +4647,16 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+
if (read_buf)
HDfree(read_buf);
if (correct_buf)
@@ -3727,7 +4665,7 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3747,7 +4685,8 @@ test_read_filtered_dataset_all_no_selection(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_point_selection(void)
+test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *correct_buf = NULL;
C_DATATYPE *read_buf = NULL;
@@ -3759,16 +4698,15 @@ test_read_filtered_dataset_point_selection(void)
hsize_t flat_dims[1];
size_t i, j, read_buf_size, correct_buf_size;
size_t num_points;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from filtered chunks with point selection");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3795,6 +4733,9 @@ test_read_filtered_dataset_point_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3803,44 +4744,43 @@ test_read_filtered_dataset_point_selection(void)
chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size;
@@ -3871,18 +4811,12 @@ test_read_filtered_dataset_point_selection(void)
VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0),
"Point selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -3941,7 +4875,7 @@ test_read_filtered_dataset_point_selection(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3964,7 +4898,8 @@ test_read_filtered_dataset_point_selection(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_interleaved_read(void)
+test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3978,16 +4913,15 @@ test_read_filtered_dataset_interleaved_read(void)
hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing interleaved read from filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
@@ -4023,6 +4957,9 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -4031,44 +4968,43 @@ test_read_filtered_dataset_interleaved_read(void)
chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size);
@@ -4110,18 +5046,12 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -4176,7 +5106,7 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4196,7 +5126,8 @@ test_read_filtered_dataset_interleaved_read(void)
* 05/16/2018
*/
static void
-test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
+test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
MPI_Datatype vector_type;
MPI_Datatype resized_vector_type;
@@ -4212,14 +5143,13 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
@@ -4245,6 +5175,9 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
@@ -4255,45 +5188,44 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY(
(H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
@@ -4340,18 +5272,12 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -4392,7 +5318,7 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4419,7 +5345,8 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
* 08/20/2021
*/
static void
-test_read_transformed_filtered_dataset_no_overlap(void)
+test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -4433,16 +5360,15 @@ test_read_transformed_filtered_dataset_no_overlap(void)
hsize_t block[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared transformed and filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
@@ -4469,6 +5395,9 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
@@ -4478,20 +5407,23 @@ test_read_transformed_filtered_dataset_no_overlap(void)
chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY(
(H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -4505,26 +5437,26 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, correct_buf) >= 0),
"Dataset write succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+
+ /* Verify space allocation status */
+ plist_id = H5Dget_create_plist(dset_id);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
@@ -4565,11 +5497,9 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read and data transform */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ /* Create property list for data transform */
+ plist_id = H5Pcopy(dxpl_id);
+ VRFY((plist_id >= 0), "DXPL copy succeeded");
/* Set data transform expression */
VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
@@ -4619,6 +5549,7 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4639,7 +5570,8 @@ test_read_transformed_filtered_dataset_no_overlap(void)
* 05/16/2018
*/
static void
-test_read_3d_filtered_dataset_no_overlap_same_pages(void)
+test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -4653,16 +5585,15 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
@@ -4689,6 +5620,9 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
@@ -4699,45 +5633,44 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >=
0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
@@ -4783,18 +5716,12 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -4833,7 +5760,7 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4854,7 +5781,8 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
* 05/16/2018
*/
static void
-test_read_3d_filtered_dataset_overlap(void)
+test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
MPI_Datatype vector_type;
MPI_Datatype resized_vector_type;
@@ -4870,14 +5798,13 @@ test_read_3d_filtered_dataset_overlap(void)
hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing read from shared filtered chunks in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
dataset_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
@@ -4916,6 +5843,9 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -4925,44 +5855,43 @@ test_read_3d_filtered_dataset_overlap(void)
chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
chunk_dims[2] = 1;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
@@ -5007,18 +5936,12 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -5068,7 +5991,7 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5088,7 +6011,8 @@ test_read_3d_filtered_dataset_overlap(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
+test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5102,16 +6026,23 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
- int * recvcounts = NULL;
- int * displs = NULL;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
@@ -5153,6 +6084,9 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5162,46 +6096,45 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
dset_id =
- H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
@@ -5241,18 +6174,12 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5291,7 +6218,7 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5311,7 +6238,8 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_no_conversion_shared(void)
+test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5325,16 +6253,23 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id, memtype;
- hid_t filespace, memspace;
- int * recvcounts = NULL;
- int * displs = NULL;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
@@ -5382,6 +6317,9 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5391,46 +6329,45 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME,
memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
dset_id =
- H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
@@ -5470,18 +6407,12 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5520,7 +6451,7 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5540,7 +6471,8 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
+test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5554,8 +6486,10 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
@@ -5563,7 +6497,12 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
@@ -5613,6 +6552,9 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5622,46 +6564,45 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
@@ -5701,18 +6642,12 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5752,7 +6687,7 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5772,7 +6707,8 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_type_conversion_shared(void)
+test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5786,8 +6722,10 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id, filetype, memtype;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
@@ -5795,7 +6733,12 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
HDputs(
"Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
@@ -5851,6 +6794,9 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5860,46 +6806,45 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
dset_id =
- H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
@@ -5939,18 +6884,12 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5989,7 +6928,7 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -6006,7 +6945,8 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
* 08/03/2017
*/
static void
-test_write_serial_read_parallel(void)
+test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -6014,14 +6954,13 @@ test_write_serial_read_parallel(void)
hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write file serially; read file in parallel");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NROWS;
dataset_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NCOLS;
dataset_dims[2] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_DEPTH;
@@ -6040,6 +6979,9 @@ test_write_serial_read_parallel(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS;
chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS;
@@ -6049,20 +6991,22 @@ test_write_serial_read_parallel(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data);
@@ -6076,10 +7020,15 @@ test_write_serial_read_parallel(void)
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
@@ -6095,28 +7044,16 @@ test_write_serial_read_parallel(void)
correct_buf[i] = (long)i;
/* All ranks open the file and verify their "portion" of the dataset is correct */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -6127,13 +7064,13 @@ test_write_serial_read_parallel(void)
HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
/*
* Tests parallel write of filtered data
* to a dataset. After the write has
@@ -6145,7 +7082,8 @@ test_write_serial_read_parallel(void)
* 08/03/2017
*/
static void
-test_write_parallel_read_serial(void)
+test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -6158,27 +7096,18 @@ test_write_parallel_read_serial(void)
hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write file in parallel; read serially");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NROWS;
@@ -6198,20 +7127,22 @@ test_write_parallel_read_serial(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -6256,22 +7187,20 @@ test_write_parallel_read_serial(void)
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE)GEN_DATA(i);
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
if (MAINPROCESS) {
@@ -6286,7 +7215,10 @@ test_write_parallel_read_serial(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT);
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
@@ -6307,6 +7239,7 @@ test_write_parallel_read_serial(void)
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
HDfree(correct_buf);
@@ -6326,9 +7259,11 @@ test_write_parallel_read_serial(void)
* 06/04/2018
*/
static void
-test_shrinking_growing_chunks(void)
+test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
{
- double *data = NULL;
+ double *data = NULL;
+ double *read_buf = NULL;
hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
@@ -6337,27 +7272,18 @@ test_shrinking_growing_chunks(void)
hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
size_t i, data_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing continually shrinking/growing chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_NROWS;
@@ -6374,19 +7300,21 @@ test_shrinking_growing_chunks(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
+ dset_id = H5Dcreate2(group_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/*
@@ -6417,39 +7345,1302 @@ test_shrinking_growing_chunks(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
data_size = sel_dims[0] * sel_dims[1] * sizeof(double);
data = (double *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
+ read_buf = (double *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
/* Continually write random float data, followed by zeroed-out data */
- if ((i % 2))
+ if (i % 2)
HDmemset(data, 0, data_size);
else {
size_t j;
for (j = 0; j < data_size / sizeof(*data); j++) {
- data[j] = (float)(rand() / (double)(RAND_MAX / (double)1.0L));
+ data[j] = (rand() / (double)(RAND_MAX / (double)1.0L));
}
}
- VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ if (i % 2) {
+ HDmemset(read_buf, 255, data_size);
+ }
+ else {
+ HDmemset(read_buf, 0, data_size);
+ }
+
+ VRFY((H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "data verification succeeded");
}
+ if (read_buf)
+ HDfree(read_buf);
if (data)
HDfree(data);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that filtered and unfiltered partial edge chunks can be
+ * written to and read from correctly in parallel when only one MPI
+ * rank writes to a particular partial edge chunk in the dataset.
+ *
+ * The dataset contains partial edge chunks in the second dimension.
+ * Each MPI rank selects a hyperslab in the shape of a single chunk
+ * that is offset to cover the whole edge chunk and part of the
+ * full chunk next to the edge chunk.
+ */
+static void
+test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered edge chunks");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ start[1] =
+ (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Repeat the previous, but set option to not filter partial edge chunks */
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared unfiltered edge chunks");
+
+ H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ start[1] =
+ (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that filtered and unfiltered partial edge chunks can be
+ * written to and read from correctly in parallel when every MPI
+ * rank writes to every partial edge chunk in the dataset.
+ *
+ * The dataset contains partial edge chunks in the second dimension.
+ * Each MPI rank selects a hyperslab in the shape of one row of each
+ * chunk that is offset in the second dimension to cover the whole
+ * edge chunk and part of the full chunk next to the edge chunk.
+ */
+static void
+test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered edge chunks");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS / WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)1;
+ block[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Repeat the previous, but set option to not filter partial edge chunks */
+ if (MAINPROCESS)
+ HDputs("Testing write to shared unfiltered edge chunks");
+
+ H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS / WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)1;
+ block[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that filtered and unfiltered partial edge chunks can be
+ * written to and read from correctly in parallel when only one
+ * MPI rank writes to a particular edge chunk in the dataset and
+ * only performs a partial write to the edge chunk.
+ *
+ * The dataset contains partial edge chunks in the second dimension.
+ * Each MPI rank selects a hyperslab in the shape of part of a single
+ * edge chunk and writes to just a portion of the edge chunk.
+ */
+static void
+test_edge_chunks_partial_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ /* TODO */
+}
+
+/*
+ * Tests that the parallel compression feature correctly handles
+ * writing fill values to a dataset and reading fill values from
+ * unallocated parts of a dataset.
+ */
+static void
+test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE fill_value;
+ hsize_t dataset_dims[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t chunk_dims[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t sel_dims[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t start[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t stride[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t count[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t block[FILL_VALUES_TEST_DATASET_DIMS];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
+
+ if (MAINPROCESS)
+ HDputs("Testing fill values");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)FILL_VALUES_TEST_NROWS;
+ dataset_dims[1] = (hsize_t)FILL_VALUES_TEST_NCOLS;
+ chunk_dims[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ chunk_dims[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(FILL_VALUES_TEST_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, FILL_VALUES_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Set a fill value */
+ fill_value = FILL_VALUES_TEST_FILL_VAL;
+ VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set");
+
+ dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT,
+ plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Allocate buffer for reading entire dataset */
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ correct_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ /* Read entire dataset and verify that the fill value is returned */
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ correct_buf[i] = FILL_VALUES_TEST_FILL_VAL;
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset and ensure that
+ * the fill value is returned for the unwritten part of
+ * the chunk, as well as for the rest of the dataset that
+ * hasn't been written to yet.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_VALUES_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /*
+ * Each MPI rank communicates their written piece of data
+ * into each other rank's correctness-checking buffer
+ */
+ recvcounts = HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(count[1] * block[1]);
+ displs[i] = (int)(i * dataset_dims[1]);
+ }
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to whole dataset and ensure fill value isn't returned
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_VALUES_TEST_NROWS / (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_VALUES_TEST_NCOLS / (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /********************************************************************
+ * Set the fill time to H5D_FILL_TIME_ALLOC and repeat the previous *
+ ********************************************************************/
+
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_ALLOC) >= 0), "H5Pset_fill_time succeeded");
+
+ dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT,
+ plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Read entire dataset and verify that the fill value is returned */
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ correct_buf[i] = FILL_VALUES_TEST_FILL_VAL;
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset and ensure that
+ * the fill value is returned for the unwritten part of
+ * the chunk, as well as for the rest of the dataset that
+ * hasn't been written to yet.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_VALUES_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(count[1] * block[1]);
+ displs[i] = (int)(i * dataset_dims[1]);
+ }
+
+ /*
+ * Each MPI rank communicates their written piece of data
+ * into each other rank's correctness-checking buffer
+ */
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to whole dataset and ensure fill value isn't returned
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_VALUES_TEST_NROWS / (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_VALUES_TEST_NCOLS / (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
+
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that the parallel compression feature can handle
+ * an undefined fill value. Nothing is verified in this
+ * test since the fill value isn't defined.
+ */
+static void
+test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ H5D_alloc_time_t alloc_time;
+ C_DATATYPE * data = NULL;
+ C_DATATYPE * read_buf = NULL;
+ hsize_t dataset_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t chunk_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t sel_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t start[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t stride[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t count[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t block[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing undefined fill value");
+
+ VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NROWS;
+ dataset_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NCOLS;
+ chunk_dims[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ chunk_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Set an undefined fill value */
+ VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, NULL) >= 0), "Fill Value set");
+
+ dset_id = H5Dcreate2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Allocate buffer for reading entire dataset */
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ /*
+ * Read entire dataset - nothing to verify since there's no fill value.
+ * If not using early space allocation, the read should fail since storage
+ * isn't allocated yet and no fill value is defined.
+ */
+ if (alloc_time == H5D_ALLOC_TIME_EARLY) {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+ }
+ else {
+ H5E_BEGIN_TRY
+ {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) < 0),
+ "Dataset read succeeded");
+ }
+ H5E_END_TRY;
+ }
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset. Don't verify
+ * anything since there's no fill value defined.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_VALUE_UNDEFINED_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /*
+ * Write to whole dataset and ensure data is correct
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NROWS / (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NCOLS / (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that the parallel compression feature correctly handles
+ * avoiding writing fill values to a dataset when the fill time
+ * is set as H5D_FILL_TIME_NEVER.
+ */
+static void
+test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *fill_buf = NULL;
+ C_DATATYPE fill_value;
+ hsize_t dataset_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t chunk_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t sel_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t start[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t stride[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t count[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t block[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
+
+ if (MAINPROCESS)
+ HDputs("Testing fill time H5D_FILL_TIME_NEVER");
+
+ /*
+ * Only run this test when incremental file space allocation is
+ * used, as HDF5's chunk allocation code always writes fill values
+ * when filters are in the pipeline, but parallel compression does
+ * incremental file space allocation differently.
+ */
+ {
+ H5D_alloc_time_t alloc_time;
+
+ VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
+
+ if (alloc_time != H5D_ALLOC_TIME_INCR) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)FILL_TIME_NEVER_TEST_NROWS;
+ dataset_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_NCOLS;
+ chunk_dims[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ chunk_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(FILL_TIME_NEVER_TEST_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, FILL_TIME_NEVER_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Set a fill value */
+ fill_value = FILL_VALUES_TEST_FILL_VAL;
+ VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set");
+
+ /* Set fill time of 'never' */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_NEVER) >= 0), "H5Pset_fill_time succeeded");
+
+ dset_id = H5Dcreate2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Allocate buffer for reading entire dataset */
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ fill_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != fill_buf), "HDcalloc succeeded");
+
+ /* Read entire dataset and verify that the fill value isn't returned */
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL;
+
+ /*
+ * It should be very unlikely for the dataset's random
+ * values to all be the fill value, so this should be
+ * a safe comparison in theory.
+ */
+ VRFY((0 != HDmemcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset and ensure that
+ * the fill value isn't returned for the unwritten part of
+ * the chunk, as well as for the rest of the dataset that
+ * hasn't been written to yet.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_TIME_NEVER_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /*
+ * Each MPI rank communicates their written piece of data
+ * into each other rank's correctness-checking buffer
+ */
+ recvcounts = HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(count[1] * block[1]);
+ displs[i] = (int)(i * dataset_dims[1]);
+ }
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ /*
+ * It should be very unlikely for the dataset's random
+ * values to all be the fill value, so this should be
+ * a safe comparison in theory.
+ */
+ VRFY((0 != HDmemcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to whole dataset and ensure fill value isn't returned
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_TIME_NEVER_TEST_NROWS / (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_TIME_NEVER_TEST_NCOLS / (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ VRFY((read_buf[i] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded");
+
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+ if (fill_buf)
+ HDfree(fill_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -6459,8 +8650,14 @@ test_shrinking_growing_chunks(void)
int
main(int argc, char **argv)
{
- size_t i;
- hid_t file_id = -1, fapl = -1;
+ size_t cur_filter_idx = 0;
+ size_t num_filters = 0;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fcpl_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
int mpi_code;
/* Initialize MPI */
@@ -6487,7 +8684,7 @@ main(int argc, char **argv)
if (MAINPROCESS) {
HDprintf("==========================\n");
- HDprintf("Parallel Filters tests\n");
+ HDprintf(" Parallel Filters tests\n");
HDprintf("==========================\n\n");
}
@@ -6496,72 +8693,161 @@ main(int argc, char **argv)
TestAlarmOn();
- /* Create test file */
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "FAPL creation succeeded");
+ num_filters = ARRAY_SIZE(filterIDs);
- VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ /* Set up file access property list with parallel I/O access,
+ * collective metadata reads/writes and the latest library
+ * version bounds */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ VRFY((H5Pset_fapl_mpio(fapl_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, TRUE) >= 0), "H5Pset_all_coll_metadata_ops succeeded");
+ VRFY((H5Pset_coll_metadata_write(fapl_id, TRUE) >= 0), "H5Pset_coll_metadata_write succeeded");
- VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL),
- "Test file name created");
-
- file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((file_id >= 0), "Test file creation succeeded");
-
- VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
- (*tests[i])();
- }
- else {
- if (MAINPROCESS)
- MESG("MPI_Barrier failed");
- nerrors++;
- }
- }
+ VRFY((H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
/*
- * Increment the filter index to switch to the checksum filter
- * and re-run the tests.
+ * Set up Paged and Persistent Free Space Management
*/
- cur_filter_idx++;
-
- h5_clean_files(FILENAME, fapl);
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl_id >= 0), "FCPL creation succeeded");
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "FAPL creation succeeded");
+ VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, 1) >= 0),
+ "H5Pset_file_space_strategy succeeded");
- VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ VRFY((h5_fixname(FILENAME[0], fapl_id, filenames[0], sizeof(filenames[0])) != NULL),
+ "Test file name created");
- file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, fcpl_id, fapl_id);
VRFY((file_id >= 0), "Test file creation succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ file_id = H5I_INVALID_HID;
- if (MAINPROCESS) {
- HDprintf("\n=================================================================\n");
- HDprintf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
- HDprintf("=================================================================\n\n");
- }
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
- (*tests[i])();
- }
- else {
- if (MAINPROCESS)
- MESG("MPI_Barrier failed");
- nerrors++;
+ /* Create property list for collective dataset write */
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Create DCPL for dataset creation */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "DCPL creation succeeded");
+
+ /* Run tests with all available filters */
+ for (cur_filter_idx = 0; cur_filter_idx < num_filters; cur_filter_idx++) {
+ H5FD_mpio_chunk_opt_t chunk_opt;
+ H5Z_filter_t cur_filter = filterIDs[cur_filter_idx];
+
+ /* Run tests with both linked-chunk and multi-chunk I/O */
+ for (chunk_opt = H5FD_MPIO_CHUNK_ONE_IO; chunk_opt <= H5FD_MPIO_CHUNK_MULTI_IO; chunk_opt++) {
+ H5D_alloc_time_t space_alloc_time;
+
+ /* Run tests with all available space allocation times */
+ for (space_alloc_time = H5D_ALLOC_TIME_EARLY; space_alloc_time <= H5D_ALLOC_TIME_INCR;
+ space_alloc_time++) {
+ const char *alloc_time;
+ unsigned filter_config;
+ htri_t filter_avail;
+ size_t i;
+ char group_name[512];
+
+ switch (space_alloc_time) {
+ case H5D_ALLOC_TIME_EARLY:
+ alloc_time = "Early";
+ break;
+ case H5D_ALLOC_TIME_LATE:
+ alloc_time = "Late";
+ break;
+ case H5D_ALLOC_TIME_INCR:
+ alloc_time = "Incremental";
+ break;
+ default:
+ alloc_time = "Unknown";
+ }
+
+ if (MAINPROCESS)
+ HDprintf("== Running tests with filter '%s' using '%s' and '%s' allocation time ==\n\n",
+ filterNames[cur_filter_idx],
+ H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "Linked-Chunk I/O" : "Multi-Chunk I/O",
+ alloc_time);
+
+ /* Make sure current filter is available before testing with it */
+ filter_avail = H5Zfilter_avail(cur_filter);
+ VRFY((filter_avail >= 0), "H5Zfilter_avail succeeded");
+
+ if (!filter_avail) {
+ if (MAINPROCESS)
+ HDprintf(" ** SKIPPED tests with filter '%s' - filter unavailable **\n\n",
+ filterNames[cur_filter_idx]);
+ continue;
+ }
+
+ /* Get the current filter's info */
+ VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), "H5Zget_filter_info succeeded");
+
+ /* Determine if filter is encode-enabled */
+ if (0 == (filter_config & H5Z_FILTER_CONFIG_ENCODE_ENABLED)) {
+ if (MAINPROCESS)
+ HDprintf(" ** SKIPPED tests with filter '%s' - filter not encode-enabled **\n\n",
+ filterNames[cur_filter_idx]);
+ continue;
+ }
+
+ /* Set space allocation time */
+ VRFY((H5Pset_alloc_time(dcpl_id, space_alloc_time) >= 0), "H5Pset_alloc_time succeeded");
+
+ /* Set chunk I/O optimization method */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ /* Create a group to hold all the datasets for this combination
+ * of filter and chunk optimization mode. Then, close the file
+ * again since some tests may need to open the file in a special
+ * way, like on rank 0 only */
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "H5Fopen succeeded");
+
+ HDsnprintf(group_name, sizeof(group_name), "%s_%s_%s", filterNames[cur_filter_idx],
+ H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "linked-chunk-io" : "multi-chunk-io",
+ alloc_time);
+
+ group_id = H5Gcreate2(file_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gcreate2 succeeded");
+
+ VRFY((H5Gclose(group_id) >= 0), "H5Gclose failed");
+ group_id = H5I_INVALID_HID;
+
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+ file_id = H5I_INVALID_HID;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ test_func func = tests[i];
+
+ if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
+ func(group_name, cur_filter, fapl_id, dcpl_id, dxpl_id);
+ }
+ else {
+ if (MAINPROCESS)
+ MESG("MPI_Barrier failed");
+ nerrors++;
+ }
+ }
+
+ if (MAINPROCESS)
+ HDputs("");
+ }
}
}
+ VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded");
+ dcpl_id = H5I_INVALID_HID;
+
+ VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded");
+ dxpl_id = H5I_INVALID_HID;
+
if (nerrors)
goto exit;
@@ -6575,7 +8861,21 @@ exit:
TestAlarmOff();
- h5_clean_files(FILENAME, fapl);
+ h5_clean_files(FILENAME, fapl_id);
+ fapl_id = H5I_INVALID_HID;
+
+ if (dcpl_id >= 0)
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ if (dxpl_id >= 0)
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ if (fapl_id >= 0)
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ if (fcpl_id >= 0)
+ VRFY((H5Pclose(fcpl_id) >= 0), "H5Pclose succeeded");
+ if (group_id >= 0)
+ VRFY((H5Gclose(group_id) >= 0), "H5Gclose succeeded");
+ if (file_id >= 0)
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
H5close();
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
index 7eb34ed..800604c 100644
--- a/testpar/t_filters_parallel.h
+++ b/testpar/t_filters_parallel.h
@@ -30,23 +30,23 @@
#include "stdlib.h"
#include "testpar.h"
+#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+
/* Used to load other filters than GZIP */
/* #define DYNAMIC_FILTER */ /* Uncomment and define the fields below to use a dynamically loaded filter */
+
+#ifdef DYNAMIC_FILTER
#define FILTER_NUM_CDVALUES 1
const unsigned int cd_values[FILTER_NUM_CDVALUES] = {0};
-H5Z_filter_t filter_id;
-unsigned int flags = 0;
-size_t cd_nelmts = FILTER_NUM_CDVALUES;
-
-/* Utility Macros */
-#define STRINGIFY(type) #type
+unsigned int flags = 0;
+size_t cd_nelmts = FILTER_NUM_CDVALUES;
+#endif
/* Common defines for all tests */
-#define C_DATATYPE long
-#define C_DATATYPE_MPI MPI_LONG
-#define COMPOUND_C_DATATYPE cmpd_filtered_t
-#define C_DATATYPE_STR(type) STRINGIFY(type)
-#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
+#define C_DATATYPE long
+#define C_DATATYPE_MPI MPI_LONG
+#define COMPOUND_C_DATATYPE cmpd_filtered_t
+#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
/* Macro used to generate data for datasets for later verification */
#define GEN_DATA(i) INCREMENTAL_DATA(i)
@@ -59,7 +59,7 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES;
#define RANK_DATA(i) \
(mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
-#define DEFAULT_DEFLATE_LEVEL 6
+#define DEFAULT_DEFLATE_LEVEL 9
#define DIM0_SCALE_FACTOR 4
#define DIM1_SCALE_FACTOR 2
@@ -89,6 +89,14 @@ typedef struct {
#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
+/* Defines for the unshared filtered chunks partial write test */
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME "unshared_filtered_chunks_partial_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS 2
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS (DIM1_SCALE_FACTOR)
+
/* Defines for the shared filtered chunks write test */
#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_write"
#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
@@ -97,6 +105,42 @@ typedef struct {
#define WRITE_SHARED_FILTERED_CHUNKS_NROWS (WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
#define WRITE_SHARED_FILTERED_CHUNKS_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
+/* Defines for the unshared filtered chunks w/ single unlim. dimension write test */
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME "unshared_filtered_chunks_single_unlim_dim_write"
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS (WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS / mpi_size)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS (WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS / mpi_size)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS 5
+
+/* Defines for the shared filtered chunks w/ single unlim. dimension write test */
+#define WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME "shared_filtered_chunks_single_unlim_dim_write"
+#define WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS (mpi_size)
+#define WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_ONE_UNLIM_DIM_NROWS (WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_ONE_UNLIM_DIM_NCOLS (WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS 5
+
+/* Defines for the unshared filtered chunks w/ two unlim. dimension write test */
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME "unshared_filtered_chunks_two_unlim_dim_write"
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS 5
+
+/* Defines for the shared filtered chunks w/ two unlim. dimension write test */
+#define WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME "shared_filtered_chunks_two_unlim_dim_write"
+#define WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_NROWS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_NCOLS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS 5
+
/* Defines for the filtered chunks write test where a process has no selection */
#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write"
#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
@@ -403,4 +447,53 @@ typedef struct {
#define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size)
#define SHRINKING_GROWING_CHUNKS_NLOOPS 20
+/* Defines for the unshared filtered edge chunks write test */
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "unshared_filtered_edge_chunks_write"
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2 "unshared_filtered_edge_chunks_no_filter_write"
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS 2
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS \
+ (mpi_size * DIM1_SCALE_FACTOR) + (WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS - 1)
+
+/* Defines for the shared filtered edge chunks write test */
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "shared_filtered_edge_chunks_write"
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2 "shared_filtered_edge_chunks_no_filter_write"
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS 2
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS (mpi_size)
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS \
+ (WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS \
+ ((WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) + \
+ (WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS - 1))
+
+/* Defines for the fill values test */
+#define FILL_VALUES_TEST_DATASET_NAME "fill_value_test"
+#define FILL_VALUES_TEST_DATASET_NAME2 "fill_value_alloc_test"
+#define FILL_VALUES_TEST_DATASET_DIMS 2
+#define FILL_VALUES_TEST_FILL_VAL (-1)
+#define FILL_VALUES_TEST_CH_NROWS (mpi_size)
+#define FILL_VALUES_TEST_CH_NCOLS (mpi_size + 1)
+#define FILL_VALUES_TEST_NROWS (FILL_VALUES_TEST_CH_NROWS * DIM0_SCALE_FACTOR)
+#define FILL_VALUES_TEST_NCOLS (FILL_VALUES_TEST_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the undefined fill value test */
+#define FILL_VALUE_UNDEFINED_TEST_DATASET_NAME "fill_value_undefined_test"
+#define FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS 2
+#define FILL_VALUE_UNDEFINED_TEST_CH_NROWS (mpi_size)
+#define FILL_VALUE_UNDEFINED_TEST_CH_NCOLS (mpi_size + 1)
+#define FILL_VALUE_UNDEFINED_TEST_NROWS (FILL_VALUE_UNDEFINED_TEST_CH_NROWS * DIM0_SCALE_FACTOR)
+#define FILL_VALUE_UNDEFINED_TEST_NCOLS (FILL_VALUE_UNDEFINED_TEST_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the fill time of 'never' test */
+#define FILL_TIME_NEVER_TEST_DATASET_NAME "fill_time_never_test"
+#define FILL_TIME_NEVER_TEST_DATASET_DIMS 2
+#define FILL_TIME_NEVER_TEST_FILL_VAL (-1)
+#define FILL_TIME_NEVER_TEST_CH_NROWS (mpi_size)
+#define FILL_TIME_NEVER_TEST_CH_NCOLS (mpi_size + 1)
+#define FILL_TIME_NEVER_TEST_NROWS (FILL_TIME_NEVER_TEST_CH_NROWS * DIM0_SCALE_FACTOR)
+#define FILL_TIME_NEVER_TEST_NCOLS (FILL_TIME_NEVER_TEST_CH_NCOLS * DIM1_SCALE_FACTOR)
+
#endif /* TEST_PARALLEL_FILTERS_H_ */
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 3041e77..97d5966 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -1721,7 +1721,7 @@ io_mode_confusion(void)
* Open the each of the data sets in turn. If all opens are successful,
* the test passes. Otherwise the test fails.
*
- * Note that this test will probably become irrelevent shortly, when we
+ * Note that this test will probably become irrelevant shortly, when we
* land the journaling modifications on the trunk -- at which point all
* cache clients will have to construct on disk images on demand.
*
@@ -1782,7 +1782,7 @@ rr_obj_hdr_flush_confusion(void)
mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm);
VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
- /* The reader proocesses branches off to do reading
+ /* The reader processes branches off to do reading
* while the writer processes continues to do writing
* Whenever writers finish one writing step, including a H5Fflush,
* they inform the readers, via MPI_COMM_WORLD, to verify.
@@ -2063,7 +2063,7 @@ rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/*
* flush the metadata cache yet again to clean the object headers.
*
- * This is an attempt to crate a situation where we have dirty
+ * This is an attempt to create a situation where we have dirty
* object header continuation chunks, but clean opject headers
* to verify a speculative bug fix -- it doesn't seem to work,
* but I will leave the code in anyway, as the object header
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index a8a756c..96cbc4b 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -215,7 +215,7 @@ test_mpio_gb_file(char *filename)
* sizes.
*/
if (MAINPROCESS) { /* only process 0 needs to check it*/
- HDprintf("MPI_Offset is %s %d bytes integeral type\n", is_signed ? "signed" : "unsigned",
+ HDprintf("MPI_Offset is %s %d bytes integral type\n", is_signed ? "signed" : "unsigned",
(int)sizeof(MPI_Offset));
if (sizeof_mpi_offset <= 4 && is_signed) {
HDprintf("Skipped 2GB range test "
@@ -400,7 +400,7 @@ finish:
* MPI-IO Test: One writes, Many reads.
* Verify if only one process writes some data and then all other
* processes can read them back correctly. This tests if the
- * underlaying parallel I/O and file system supports parallel I/O
+ * underlying parallel I/O and file system supports parallel I/O
* correctly.
*
* Algorithm: Only one process (e.g., process 0) writes some data.
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 2a80f4a..0500a2d 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -43,7 +43,7 @@ static int data_g[100][100];
*-------------------------------------------------------------------------
*/
static hid_t
-create_test_file(char *name, hid_t fapl_id)
+create_test_file(char *name, size_t name_length, hid_t fapl_id)
{
hid_t fid = H5I_INVALID_HID;
hid_t dcpl_id = H5I_INVALID_HID;
@@ -86,7 +86,7 @@ create_test_file(char *name, hid_t fapl_id)
if ((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < N_GROUPS; i++) {
- HDsprintf(name, "grp%02u", (unsigned)i);
+ HDsnprintf(name, name_length, "grp%02u", (unsigned)i);
if ((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
@@ -133,7 +133,7 @@ main(int argc, char *argv[])
TESTING("H5Fflush (part1)");
/* Don't run using the split VFD */
- envval = HDgetenv("HDF5_DRIVER");
+ envval = HDgetenv(HDF5_DRIVER);
if (envval == NULL)
envval = "nomatch";
@@ -153,7 +153,7 @@ main(int argc, char *argv[])
/* Create the file */
h5_fixname(FILENAME[0], fapl_id, name, sizeof(name));
- if ((fid1 = create_test_file(name, fapl_id)) < 0)
+ if ((fid1 = create_test_file(name, sizeof(name), fapl_id)) < 0)
goto error;
/* Flush and exit without closing the library */
if (H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
@@ -161,7 +161,7 @@ main(int argc, char *argv[])
/* Create the other file which will not be flushed */
h5_fixname(FILENAME[1], fapl_id, name, sizeof(name));
- if ((fid2 = create_test_file(name, fapl_id)) < 0)
+ if ((fid2 = create_test_file(name, sizeof(name), fapl_id)) < 0)
goto error;
if (mpi_rank == 0)
@@ -207,5 +207,7 @@ error:
HDfflush(stderr);
HDprintf("*** ERROR ***\n");
HDprintf("THERE WAS A REAL ERROR IN t_pflush1.\n");
+ HDfflush(stdout);
+
HD_exit(EXIT_FAILURE);
} /* end main() */
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index c96233a..8cf40d0 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -43,7 +43,7 @@ static int data_g[100][100];
*-------------------------------------------------------------------------
*/
static herr_t
-check_test_file(char *name, hid_t fapl_id)
+check_test_file(char *name, size_t name_length, hid_t fapl_id)
{
hid_t fid = H5I_INVALID_HID;
hid_t sid = H5I_INVALID_HID;
@@ -89,7 +89,7 @@ check_test_file(char *name, hid_t fapl_id)
if ((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < N_GROUPS; i++) {
- HDsprintf(name, "grp%02u", (unsigned)i);
+ HDsnprintf(name, name_length, "grp%02u", (unsigned)i);
if ((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
@@ -157,7 +157,7 @@ main(int argc, char *argv[])
TESTING("H5Fflush (part2 with flush)");
/* Don't run using the split VFD */
- envval = HDgetenv("HDF5_DRIVER");
+ envval = HDgetenv(HDF5_DRIVER);
if (envval == NULL)
envval = "nomatch";
@@ -167,7 +167,7 @@ main(int argc, char *argv[])
HDputs(" Test not compatible with current Virtual File Driver");
}
MPI_Finalize();
- HDexit(EXIT_FAILURE);
+ HDexit(EXIT_SUCCESS);
}
if ((fapl_id1 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
@@ -182,7 +182,7 @@ main(int argc, char *argv[])
/* Check the case where the file was flushed */
h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name));
- if (check_test_file(name, fapl_id1)) {
+ if (check_test_file(name, sizeof(name), fapl_id1)) {
H5_FAILED()
goto error;
}
@@ -199,7 +199,7 @@ main(int argc, char *argv[])
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name));
- if (check_test_file(name, fapl_id2)) {
+ if (check_test_file(name, sizeof(name), fapl_id2)) {
if (mpi_rank == 0)
PASSED();
}
diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c
index 05f2c77..59f14cb 100644
--- a/testpar/t_ph5basic.c
+++ b/testpar/t_ph5basic.c
@@ -56,7 +56,7 @@ test_fapl_mpio_dup(void)
HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
/* Create a new communicator that has the same processes as MPI_COMM_WORLD.
- * Use MPI_Comm_split because it is simplier than MPI_Comm_create
+ * Use MPI_Comm_split because it is simpler than MPI_Comm_create
*/
mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm);
VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index 17091cb..d79130b 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -68,7 +68,7 @@ static char *test_argv0 = NULL;
* more or less in half. Each sub group will operate
* collectively on their assigned file. This split into
* subgroups validates that parallel groups can successfully
- * open and read data independantly from the other parallel
+ * open and read data independently from the other parallel
* operations taking place.
*
* Return: Success: 0
@@ -132,7 +132,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
}
/* Setup the file names
- * The test specfic filenames are stored as consecutive
+ * The test specific filenames are stored as consecutive
* array entries in the global 'FILENAMES' array above.
* Here, we simply decide on the starting index for
* file construction. The reading portion of the test
@@ -1043,11 +1043,11 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
* Purpose: To implement a parallel test which validates whether the
* new superblock lookup functionality is working correctly.
*
- * The test consists of creating two seperate HDF datasets
+ * The test consists of creating two separate HDF datasets
* in which random text is inserted at the start of each
* file using the 'j5jam' application. This forces the
* HDF5 file superblock to a non-zero offset.
- * Having created the two independant files, we create two
+ * Having created the two independent files, we create two
* non-overlapping MPI groups, each of which is then tasked
* with the opening and validation of the data contained
* therein.
@@ -1152,7 +1152,7 @@ main(int argc, char **argv)
goto finish;
}
- /* Now read the generated test file (stil using MPI_COMM_WORLD) */
+ /* Now read the generated test file (still using MPI_COMM_WORLD) */
nerrs += test_parallel_read(MPI_COMM_WORLD, mpi_rank, mpi_size, which_group);
if (nerrs > 0) {
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index a3f1859..606e100 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -215,7 +215,7 @@ test_plist_ed(void)
dxpl = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_btree_ratios(dxpl, 0.2f, 0.6f, 0.2f);
+ ret = H5Pset_btree_ratios(dxpl, 0.2, 0.6, 0.2);
VRFY((ret >= 0), "H5Pset_btree_ratios succeeded");
ret = H5Pset_hyper_vector_size(dxpl, 5);
@@ -354,7 +354,7 @@ test_plist_ed(void)
ret = H5Pset_alignment(fapl, 2, 1024);
VRFY((ret >= 0), "H5Pset_alignment succeeded");
- ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3f);
+ ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3);
VRFY((ret >= 0), "H5Pset_cache succeeded");
ret = H5Pset_elink_file_cache_size(fapl, 10485760);
@@ -556,7 +556,7 @@ external_links(void)
/* test opening a group that is to an external link, the external linked
file should inherit the source file's access properties */
- HDsprintf(link_path, "%s%s%s", group_path, "/", link_name);
+ HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
group = H5Gopen2(fid, link_path, H5P_DEFAULT);
VRFY((group >= 0), "H5Gopen succeeded");
ret = H5Gclose(group);
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 2dd867a..0a18781 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -11,7 +11,7 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- This program will test independant and collective reads and writes between
+ This program will test independent and collective reads and writes between
selections of different rank that non-the-less are deemed as having the
same shape by H5Sselect_shape_same().
*/
@@ -415,11 +415,11 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
/* setup selections for writing initial data to the small data set */
ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
if (MAINPROCESS) { /* add an additional slice to the selections */
@@ -427,11 +427,11 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) succeeded");
}
/* write the initial value of the small data set to file */
@@ -475,21 +475,21 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
/* In passing, setup the process slice dataspaces as well */
ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
tv_ptr->stride, tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
tv_ptr->stride, tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) succeeded");
if (MAINPROCESS) { /* add an additional slice to the selections */
@@ -497,11 +497,11 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) succeeded");
}
/* write the initial value of the large data set to file */
@@ -933,7 +933,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
@@ -1185,11 +1185,11 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
@@ -1462,7 +1462,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
/* set up start, stride, count, and block -- note that we will
* change start[] so as to write slices of the small data set to
@@ -1572,7 +1572,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid,
tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_2);
- VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded");
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
/* select the portion of the in memory large cube to which we
* are going to write data.
@@ -2008,8 +2008,8 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
** edge_size, and a checker_edge_size, select a checker
** board selection of a sel_rank (sel_rank < tgt_rank)
** dimensional slice through the dataspace parallel to the
-** sel_rank fastest changing indicies, with origin (in the
-** higher indicies) as indicated by the start array.
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
**
** Note that this function, like all its relatives, is
** hard coded to presume a maximum dataspace rank of 5.
@@ -2318,7 +2318,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
** or writing a checker board selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
** of the target data set. Also, this slice must be parallel
-** to the fastest changing indicies.
+** to the fastest changing indices.
**
** It is further presumed that the buffer was zeroed before
** the read/write, and that the full target data set (i.e.
@@ -2353,7 +2353,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
** of the buffer resides either at the origin of either
** a selected or an unselected checker. (Translation:
** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube oposite the origin.)
+** intersect the edges of the n-cube opposite the origin.)
**
****************************************************************/
@@ -3091,11 +3091,11 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
@@ -3384,11 +3384,11 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
tv_ptr->count, tv_ptr->block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) succeeded");
/* setup a checkerboard selection of the slice of the in memory small
* data set associated with the process's mpi rank.
@@ -3496,7 +3496,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
*/
ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2);
- VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded");
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
/* select the portion of the in memory large cube to which we
* are going to write data.
@@ -4210,42 +4210,42 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
return (ret_pl);
}
-/* Shape Same test using contigous hyperslab using independent IO on contigous datasets */
+/* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */
static void
sscontig1(void)
{
contig_hs_dr_pio_test(IND_CONTIG);
}
-/* Shape Same test using contigous hyperslab using collective IO on contigous datasets */
+/* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */
static void
sscontig2(void)
{
contig_hs_dr_pio_test(COL_CONTIG);
}
-/* Shape Same test using contigous hyperslab using independent IO on chunked datasets */
+/* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */
static void
sscontig3(void)
{
contig_hs_dr_pio_test(IND_CHUNKED);
}
-/* Shape Same test using contigous hyperslab using collective IO on chunked datasets */
+/* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */
static void
sscontig4(void)
{
contig_hs_dr_pio_test(COL_CHUNKED);
}
-/* Shape Same test using checker hyperslab using independent IO on contigous datasets */
+/* Shape Same test using checker hyperslab using independent IO on contiguous datasets */
static void
sschecker1(void)
{
ckrbrd_hs_dr_pio_test(IND_CONTIG);
}
-/* Shape Same test using checker hyperslab using collective IO on contigous datasets */
+/* Shape Same test using checker hyperslab using collective IO on contiguous datasets */
static void
sschecker2(void)
{
@@ -4305,7 +4305,7 @@ main(int argc, char **argv)
/* Initialize testing framework */
TestInit(argv[0], usage, parse_options);
- /* Shape Same tests using contigous hyperslab */
+ /* Shape Same tests using contiguous hyperslab */
AddTest("sscontig1", sscontig1, NULL, "Cntg hslab, ind IO, cntg dsets", PARATESTFILE);
AddTest("sscontig2", sscontig2, NULL, "Cntg hslab, col IO, cntg dsets", PARATESTFILE);
AddTest("sscontig3", sscontig3, NULL, "Cntg hslab, ind IO, chnk dsets", PARATESTFILE);
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 518741d..e624a01 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -424,7 +424,7 @@ coll_write_test(int chunk_factor)
the correctedness of collective write compared with
independent write,
- In order to throughly test this feature, we choose
+ In order to thoroughly test this feature, we choose
a different selection set for reading the data out.
@@ -890,8 +890,8 @@ coll_read_test(void)
** edge_size, and a checker_edge_size, select a checker
** board selection of a sel_rank (sel_rank < tgt_rank)
** dimensional slice through the dataspace parallel to the
-** sel_rank fastest changing indicies, with origin (in the
-** higher indicies) as indicated by the start array.
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
**
** Note that this function, is hard coded to presume a
** maximum dataspace rank of 5.
@@ -1230,7 +1230,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** or writing a checker board selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
** of the target data set. Also, this slice must be parallel
-** to the fastest changing indicies.
+** to the fastest changing indices.
**
** It is further presumed that the buffer was zeroed before
** the read/write, and that the full target data set (i.e.
@@ -1265,7 +1265,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** of the buffer resides either at the origin of either
** a selected or an unselected checker. (Translation:
** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube oposite the origin.)
+** intersect the edges of the n-cube opposite the origin.)
**
****************************************************************/
@@ -1726,7 +1726,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (!use_collective_io) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() suceeded");
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() succeeded");
}
/* setup selection to write initial data to the small data sets */
@@ -1758,10 +1758,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* setup selections for writing initial data to the small data set */
ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) succeeded");
if (MAINPROCESS) { /* add an additional slice to the selections */
@@ -1782,10 +1782,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) succeeded");
}
check = H5Sselect_valid(mem_small_ds_sid);
@@ -1864,10 +1864,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) succeeded");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
@@ -1897,10 +1897,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) succeeded");
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
@@ -2427,7 +2427,7 @@ link_chunk_collective_io_test(void)
/* select the file and mem spaces */
start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE);
ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) suceeded");
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) succeeded");
ret = H5Sselect_all(write_mem_ds_sid);
VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded");
diff --git a/testpar/testpflush.sh.in b/testpar/testpflush.sh.in
index be0dcb3..b400447 100644
--- a/testpar/testpflush.sh.in
+++ b/testpar/testpflush.sh.in
@@ -50,7 +50,7 @@ RUNPARALLELSCRIPT=`echo "@RUNPARALLEL@" | sed "s/$$/\$/g"`
echo "*** NOTE ***********************************************************"
echo "You may see complaints from mpiexec et al. that not all processes"
echo "called MPI_Finalize(). This is an intended characteristic of the"
-echo "test and should not be considered an error."
+echo "test and should not be considered an error."
echo "********************************************************************"
eval ${RUNPARALLELSCRIPT} ./t_pflush1
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index e3075a3..e6c36bf 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -186,10 +186,6 @@ enum H5TEST_COLL_CHUNK_API {
#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040
-#define TEST_FILTERS 0x080
-/* TEST_FILTERS will take place of this after supporting mpio + filter for
- * H5Dcreate and H5Dwrite */
-#define TEST_FILTERS_READ 0x100
/* Don't erase these lines, they are put here for debugging purposes */
/*
@@ -218,8 +214,8 @@ typedef int DATATYPE;
/* Shape Same Tests Definitions */
typedef enum {
- IND_CONTIG, /* Independent IO on contigous datasets */
- COL_CONTIG, /* Collective IO on contigous datasets */
+ IND_CONTIG, /* Independent IO on contiguous datasets */
+ COL_CONTIG, /* Collective IO on contiguous datasets */
IND_CHUNKED, /* Independent IO on chunked datasets */
COL_CHUNKED /* Collective IO on chunked datasets */
} ShapeSameTestMethods;