summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorjhendersonHDF <jhenderson@hdfgroup.org>2023-10-19 22:23:59 (GMT)
committerGitHub <noreply@github.com>2023-10-19 22:23:59 (GMT)
commitaf56339d3bb0ba0076c10f929472f766c9a9a5af (patch)
treecdceb3b0a3846f7e5acbc340c19a688b0ddb7603 /testpar
parent2bb6619528c1faef3382705f9635f85ad32e6f6d (diff)
downloadhdf5-af56339d3bb0ba0076c10f929472f766c9a9a5af.zip
hdf5-af56339d3bb0ba0076c10f929472f766c9a9a5af.tar.gz
hdf5-af56339d3bb0ba0076c10f929472f766c9a9a5af.tar.bz2
Fixes and adjustments to t_filters_parallel (#3714)
Broadcast number of datasets to create in multi-dataset I/O cases so that interference with random number generation doesn't cause mismatches between ranks Set fill time for datasets to "never" by default and adjust on a per-test basis to avoid writing fill values to chunks when it's unnecessary Reduce number of loops run in some tests when performing multi-dataset I/O Fix an issue in the "fill time never" test where data verification could fill if file space reuse causes application buffers to be filled with chosen fill value when reading from datasets with uninitialized storage Skip multi-chunk I/O test configurations for multi-dataset I/O configurations when the TestExpress level is > 1 since those tests can be more stressful on the file system Disable use of persistent file free space management for now since it occasionally runs into an infinite loop in the library's free space management code
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_filters_parallel.c231
-rw-r--r--testpar/t_filters_parallel.h2
2 files changed, 152 insertions, 81 deletions
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index bdfde09..7dfb8bc 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -576,11 +576,21 @@ create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t
case USE_MULTIPLE_DATASETS:
case USE_MULTIPLE_DATASETS_MIXED_FILTERED:
dset_name_ptr = dset_name_multi_buf;
- n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2;
+
+ if (MAINPROCESS)
+ n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2;
+
+ if (mpi_size > 1)
+ VRFY((MPI_SUCCESS == MPI_Bcast(&n_dsets, 1, MPI_INT, 0, comm)), "MPI_Bcast succeeded");
/* Select between 1 and (n_dsets - 1) datasets to be unfiltered */
if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) {
- n_unfiltered = (rand() % (n_dsets - 1)) + 1;
+ if (MAINPROCESS)
+ n_unfiltered = (rand() % (n_dsets - 1)) + 1;
+
+ if (mpi_size > 1)
+ VRFY((MPI_SUCCESS == MPI_Bcast(&n_unfiltered, 1, MPI_INT, 0, comm)),
+ "MPI_Bcast succeeded");
unfiltered_dcpl = H5Pcopy(dcpl_id);
VRFY((unfiltered_dcpl >= 0), "H5Pcopy succeeded");
@@ -621,7 +631,11 @@ create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t
* remaining datasets as unfiltered datasets. Otherwise,
* randomly determine if a dataset will be unfiltered.
*/
- unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0);
+ if (MAINPROCESS)
+ unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0);
+
+ if (mpi_size > 1)
+ VRFY((MPI_SUCCESS == MPI_Bcast(&unfiltered, 1, MPI_C_BOOL, 0, comm)), "MPI_Bcast succeeded");
if (unfiltered) {
curr_dcpl = unfiltered_dcpl;
@@ -1201,6 +1215,12 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
+ /*
+ * Since we're only doing a partial write to the dataset, make
+ * sure the fill time is set appropriately
+ */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded");
+
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
@@ -1485,6 +1505,7 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group
hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
size_t data_size;
size_t num_dsets;
+ size_t num_loops;
hid_t dset_ids[MAX_NUM_DSETS_MULTI];
hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
@@ -1551,7 +1572,12 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group
read_bufs[dset_idx] = tmp_buf;
}
- for (size_t i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
+ /* Determine number of loops to run through */
+ num_loops = WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS;
+ if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED))
+ num_loops /= 2;
+
+ for (size_t i = 0; i < num_loops; i++) {
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
@@ -1590,7 +1616,7 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group
VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
"Data verification succeeded");
- if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
+ if (i < num_loops - 1) {
/* Extend the dataset(s) by count[1] chunks in the extensible dimension */
dataset_dims[1] += count[1] * block[1];
@@ -1646,6 +1672,7 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H
hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
size_t data_size;
size_t num_dsets;
+ size_t num_loops;
hid_t dset_ids[MAX_NUM_DSETS_MULTI];
hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
@@ -1712,7 +1739,12 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H
read_bufs[dset_idx] = tmp_buf;
}
- for (size_t i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
+ /* Determine number of loops to run through */
+ num_loops = WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS;
+ if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED))
+ num_loops /= 2;
+
+ for (size_t i = 0; i < num_loops; i++) {
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
@@ -1750,7 +1782,7 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H
VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
"Data verification succeeded");
- if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
+ if (i < num_loops - 1) {
/* Extend the dataset(s) by count[1] chunks in the extensible dimension */
dataset_dims[1] += count[1] * block[1];
@@ -1808,6 +1840,7 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
size_t data_size;
size_t num_dsets;
+ size_t num_loops;
hid_t dset_ids[MAX_NUM_DSETS_MULTI];
hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
@@ -1855,7 +1888,12 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- for (size_t i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
+ /* Determine number of loops to run through */
+ num_loops = WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS;
+ if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED))
+ num_loops /= 2;
+
+ for (size_t i = 0; i < num_loops; i++) {
/* Set selected dimensions */
sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
@@ -1916,7 +1954,7 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
"Data verification succeeded");
- if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
+ if (i < num_loops - 1) {
/*
* Extend the dataset(s) by the size of one chunk per rank
* in the first extensible dimension. Extend the dataset(s)
@@ -1977,6 +2015,7 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5
hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
size_t data_size;
size_t num_dsets;
+ size_t num_loops;
hid_t dset_ids[MAX_NUM_DSETS_MULTI];
hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
@@ -2024,7 +2063,12 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- for (size_t i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
+ /* Determine number of loops to run through */
+ num_loops = WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS;
+ if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED))
+ num_loops /= 2;
+
+ for (size_t i = 0; i < num_loops; i++) {
/* Set selected dimensions */
sel_dims[0] = (i + 1);
sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
@@ -2085,7 +2129,7 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5
VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
"Data verification succeeded");
- if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
+ if (i < num_loops - 1) {
/* Extend the dataset(s) by the size of a chunk in each extensible dimension */
dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
@@ -2177,6 +2221,12 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
+ /*
+ * Since we're only doing a partial write to the dataset, make
+ * sure the fill time is set appropriately
+ */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded");
+
VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
@@ -2341,6 +2391,12 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
+ /*
+ * Since we're doing a no-op write to the dataset,
+ * make sure the fill time is set appropriately
+ */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded");
+
VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
@@ -5250,7 +5306,6 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter
void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t data_size, read_buf_size;
size_t num_dsets;
hid_t dset_ids[MAX_NUM_DSETS_MULTI];
@@ -5368,8 +5423,6 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter
open_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
dset_ids);
- sel_dims[0] = sel_dims[1] = 0;
-
select_none(num_dsets, dset_ids, fspace_ids);
read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
@@ -8275,6 +8328,7 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id,
hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
size_t data_size;
size_t num_dsets;
+ size_t num_loops;
hid_t dset_ids[MAX_NUM_DSETS_MULTI];
hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
@@ -8352,7 +8406,12 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id,
read_bufs[dset_idx] = tmp_buf;
}
- for (size_t i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
+ /* Determine number of loops to run through */
+ num_loops = SHRINKING_GROWING_CHUNKS_NLOOPS;
+ if ((test_mode == USE_MULTIPLE_DATASETS) || (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED))
+ num_loops /= 2;
+
+ for (size_t i = 0; i < num_loops; i++) {
for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
/* Continually write random float data, followed by zeroed-out data */
if (i % 2)
@@ -8544,6 +8603,12 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi
filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
+ /*
+ * Since we're only doing a partial write to the dataset, make
+ * sure the fill time is set appropriately
+ */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded");
+
H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
/* Create datasets depending on the current test mode */
@@ -8748,6 +8813,12 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t
filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
+ /*
+ * Since we're only doing a partial write to the dataset, make
+ * sure the fill time is set appropriately
+ */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded");
+
H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
/* Create datasets depending on the current test mode */
@@ -8870,6 +8941,9 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
+ /* Make sure the fill time is set appropriately */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded");
+
VRFY((H5Pset_chunk(plist_id, FILL_VALUES_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
@@ -9231,6 +9305,9 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
+ /* Make sure the fill time is set appropriately */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_IFSET) >= 0), "H5Pset_fill_time succeeded");
+
VRFY((H5Pset_chunk(plist_id, FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
@@ -9449,12 +9526,10 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
size_t num_dsets;
hid_t dset_ids[MAX_NUM_DSETS_MULTI];
hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
- hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID;
- int *recvcounts = NULL;
- int *displs = NULL;
- int mpi_code;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
puts("Testing fill time H5D_FILL_TIME_NEVER");
@@ -9504,7 +9579,7 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Set a fill value */
- fill_value = FILL_VALUES_TEST_FILL_VAL;
+ fill_value = FILL_TIME_NEVER_TEST_FILL_VAL;
VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set");
/* Set fill time of 'never' */
@@ -9519,6 +9594,21 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ /* Allocate buffer for reading entire dataset */
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
+
+ /* Allocate buffer of fill values */
+ fill_buf = calloc(1, read_buf_size);
+ VRFY((NULL != fill_buf), "calloc succeeded");
+
+ for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++)
+ fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL;
+
/*
* Since we aren't writing fill values to the chunks of the
* datasets we just created, close and re-open file to ensure
@@ -9538,37 +9628,21 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
- /* Allocate buffer for reading entire dataset */
- read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
-
- for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
- read_bufs[dset_idx] = calloc(1, read_buf_size);
- VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
- }
-
- fill_buf = calloc(1, read_buf_size);
- VRFY((NULL != fill_buf), "calloc succeeded");
-
- /* Read entire dataset and verify that the fill value isn't returned */
- read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
-
- for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++)
- fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL;
-
/*
- * It should be very unlikely for the dataset's random
- * values to all be the fill value, so this should be
- * a safe comparison in theory.
+ * Read entire dataset just to try to verify bad behavior doesn't
+ * occur. Don't attempt to verify the contents of the read buffer(s)
+ * yet, because there's no guarantee as to what may have been
+ * read from the dataset.
*/
- for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
- VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
/*
* Write to part of the first chunk in the dataset with
- * all ranks, then read the whole dataset and ensure that
- * the fill value isn't returned for the unwritten part of
- * the chunk, as well as for the rest of the dataset that
- * hasn't been written to yet.
+ * all ranks, then read the whole dataset just to try to
+ * verify bad behavior doesn't occur. Don't attempt to
+ * verify the contents of the read buffer(s) yet, because
+ * there's no guarantee as to what may have been read from
+ * the dataset.
*/
count[0] = 1;
count[1] = 1;
@@ -9609,34 +9683,6 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- /*
- * Each MPI rank communicates their written piece of data
- * into each other rank's correctness-checking buffer
- */
- recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
- VRFY((NULL != recvcounts), "calloc succeeded");
-
- displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
-
- for (size_t i = 0; i < (size_t)mpi_size; i++) {
- recvcounts[i] = (int)(count[1] * block[1]);
- displs[i] = (int)(i * dataset_dims[1]);
- }
-
- for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
- mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf,
- recvcounts, displs, C_DATATYPE_MPI, comm);
- VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
-
- /*
- * It should be very unlikely for the dataset's random
- * values to all be the fill value, so this should be
- * a safe comparison in theory.
- */
- VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded");
- }
-
for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
@@ -9680,9 +9726,6 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
VRFY((tmp_buf[j] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded");
}
- free(displs);
- free(recvcounts);
-
free(fill_buf);
for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
@@ -9827,7 +9870,13 @@ main(int argc, char **argv)
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
VRFY((fcpl_id >= 0), "FCPL creation succeeded");
- VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, true, 1) >= 0),
+ /*
+ * TODO: Ideally, use persistent free space management. However,
+ * this occasionally runs into an infinite loop in the library's
+ * free space management code, so don't persist free space for now
+ * until that is fixed.
+ */
+ VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, false, 1) >= 0),
"H5Pset_file_space_strategy succeeded");
VRFY((h5_fixname(FILENAME[0], fapl_id, filenames[0], sizeof(filenames[0])) != NULL),
@@ -9956,6 +10005,19 @@ main(int argc, char **argv)
continue;
}
+ /*
+ * If TestExpress is > 1, only run the multi-chunk I/O
+ * configuration tests for the 'USE_SINGLE_DATASET' case,
+ * as the 'USE_MULTIPLE_DATASETS' and 'USE_MULTIPLE_DATASETS_MIXED_FILTERED'
+ * cases are more stressful on the file system.
+ */
+ if (test_express_level_g > 1) {
+ if (((test_mode == USE_MULTIPLE_DATASETS) ||
+ (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)) &&
+ (chunk_opt != H5FD_MPIO_CHUNK_ONE_IO))
+ continue;
+ }
+
if (MAINPROCESS) {
printf("== Running tests in mode '%s' with filter '%s' using selection I/O mode "
"'%s', '%s' and '%s' allocation time ==\n\n",
@@ -9992,6 +10054,15 @@ main(int argc, char **argv)
VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0),
"H5Pset_dxpl_mpio_chunk_opt succeeded");
+ /*
+ * Disable writing of fill values by default. Otherwise, a
+ * lot of time may be spent writing fill values to chunks
+ * when they're going to be fully overwritten anyway.
+ * Individual tests will alter this behavior as necessary.
+ */
+ VRFY((H5Pset_fill_time(dcpl_id, H5D_FILL_TIME_NEVER) >= 0),
+ "H5Pset_fill_time succeeded");
+
/* Create a group to hold all the datasets for this combination
* of filter and chunk optimization mode. Then, close the file
* again since some tests may need to open the file in a special
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
index c0b1db8..04d3639 100644
--- a/testpar/t_filters_parallel.h
+++ b/testpar/t_filters_parallel.h
@@ -444,7 +444,7 @@ typedef struct {
#define SHRINKING_GROWING_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
#define SHRINKING_GROWING_CHUNKS_CH_NROWS (SHRINKING_GROWING_CHUNKS_NROWS / mpi_size)
#define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size)
-#define SHRINKING_GROWING_CHUNKS_NLOOPS 20
+#define SHRINKING_GROWING_CHUNKS_NLOOPS 8
/* Defines for the unshared filtered edge chunks write test */
#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "unshared_filtered_edge_chunks_write"