summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/API/H5_api_async_test_parallel.c100
-rw-r--r--testpar/API/H5_api_dataset_test_parallel.c50
-rw-r--r--testpar/API/H5_api_file_test_parallel.c8
-rw-r--r--testpar/API/H5_api_test_parallel.c22
-rw-r--r--testpar/API/H5_api_test_parallel.h8
-rw-r--r--testpar/API/t_bigio.c4
-rw-r--r--testpar/API/t_chunk_alloc.c2
-rw-r--r--testpar/API/t_coll_chunk.c2
-rw-r--r--testpar/API/t_dset.c40
-rw-r--r--testpar/API/t_file.c88
-rw-r--r--testpar/API/t_file_image.c24
-rw-r--r--testpar/API/t_filter_read.c2
-rw-r--r--testpar/API/t_mdset.c68
-rw-r--r--testpar/API/t_prop.c24
-rw-r--r--testpar/API/t_shapesame.c165
-rw-r--r--testpar/API/t_span_tree.c124
-rw-r--r--testpar/API/testphdf5.c4
-rw-r--r--testpar/API/testphdf5.h20
-rw-r--r--testpar/t_2Gio.c70
-rw-r--r--testpar/t_bigio.c14
-rw-r--r--testpar/t_cache.c879
-rw-r--r--testpar/t_cache_image.c704
-rw-r--r--testpar/t_chunk_alloc.c2
-rw-r--r--testpar/t_dset.c76
-rw-r--r--testpar/t_file.c98
-rw-r--r--testpar/t_file_image.c32
-rw-r--r--testpar/t_filter_read.c2
-rw-r--r--testpar/t_filters_parallel.c8452
-rw-r--r--testpar/t_filters_parallel.h5
-rw-r--r--testpar/t_init_term.c6
-rw-r--r--testpar/t_mdset.c74
-rw-r--r--testpar/t_pflush1.c8
-rw-r--r--testpar/t_pflush2.c8
-rw-r--r--testpar/t_pmulti_dset.c36
-rw-r--r--testpar/t_pread.c238
-rw-r--r--testpar/t_prop.c24
-rw-r--r--testpar/t_select_io_dset.c66
-rw-r--r--testpar/t_shapesame.c240
-rw-r--r--testpar/t_span_tree.c118
-rw-r--r--testpar/t_subfiling_vfd.c242
-rw-r--r--testpar/t_vfd.c566
-rw-r--r--testpar/testphdf5.c16
-rw-r--r--testpar/testphdf5.h12
43 files changed, 6975 insertions, 5768 deletions
diff --git a/testpar/API/H5_api_async_test_parallel.c b/testpar/API/H5_api_async_test_parallel.c
index 23e3544..79327d0 100644
--- a/testpar/API/H5_api_async_test_parallel.c
+++ b/testpar/API/H5_api_async_test_parallel.c
@@ -47,7 +47,7 @@ static int (*par_async_tests[])(void) = {
test_file_reopen,
};
-hbool_t coll_metadata_read = TRUE;
+bool coll_metadata_read = true;
/* Highest "printf" file created (starting at 0) */
int max_printf_file = -1;
@@ -65,8 +65,8 @@ test_one_dataset_io(void)
hsize_t stride[ONE_DATASET_IO_TEST_SPACE_RANK];
hsize_t count[ONE_DATASET_IO_TEST_SPACE_RANK];
hsize_t block[ONE_DATASET_IO_TEST_SPACE_RANK];
- hbool_t op_failed = false;
- hbool_t is_native_vol = false;
+ bool op_failed = false;
+ bool is_native_vol = false;
size_t i, data_size, num_in_progress;
hid_t file_id = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
@@ -429,7 +429,7 @@ test_multi_dataset_io(void)
hsize_t stride[MULTI_DATASET_IO_TEST_SPACE_RANK];
hsize_t count[MULTI_DATASET_IO_TEST_SPACE_RANK];
hsize_t block[MULTI_DATASET_IO_TEST_SPACE_RANK];
- hbool_t op_failed;
+ bool op_failed;
size_t i, j, data_size, num_in_progress;
hid_t file_id = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
@@ -753,8 +753,8 @@ test_multi_file_dataset_io(void)
hsize_t stride[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
hsize_t count[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
hsize_t block[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
- hbool_t op_failed = false;
- hbool_t is_native_vol = false;
+ bool op_failed = false;
+ bool is_native_vol = false;
size_t i, j, data_size, num_in_progress;
hid_t fapl_id = H5I_INVALID_HID;
hid_t file_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
@@ -1178,7 +1178,7 @@ test_multi_file_grp_dset_io(void)
hsize_t stride[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
hsize_t count[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
hsize_t block[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
- hbool_t op_failed;
+ bool op_failed;
size_t i, j, data_size, num_in_progress;
hid_t file_id = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
@@ -1597,8 +1597,8 @@ test_set_extent(void)
hsize_t stride[SET_EXTENT_TEST_SPACE_RANK];
hsize_t count[SET_EXTENT_TEST_SPACE_RANK];
hsize_t block[SET_EXTENT_TEST_SPACE_RANK];
- hbool_t op_failed = false;
- hbool_t is_native_vol = false;
+ bool op_failed = false;
+ bool is_native_vol = false;
size_t i, j, data_size, num_in_progress;
hid_t file_id = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
@@ -1933,11 +1933,11 @@ static int
test_attribute_exists(void)
{
hsize_t *dims = NULL;
- hbool_t op_failed = false;
- hbool_t is_native_vol = false;
+ bool op_failed = false;
+ bool is_native_vol = false;
size_t num_in_progress;
- hbool_t exists1 = false;
- hbool_t exists2 = false;
+ bool exists1 = false;
+ bool exists2 = false;
hid_t file_id = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
hid_t dset_id = H5I_INVALID_HID;
@@ -2022,9 +2022,9 @@ test_attribute_exists(void)
/* Check if H5Aexists returned the correct values */
if (exists1)
- FAIL_PUTS_ERROR(" H5Aexists returned TRUE for an attribute that should not exist")
+ FAIL_PUTS_ERROR(" H5Aexists returned true for an attribute that should not exist")
if (!exists2)
- FAIL_PUTS_ERROR(" H5Aexists returned FALSE for an attribute that should exist");
+ FAIL_PUTS_ERROR(" H5Aexists returned false for an attribute that should exist");
/* Close */
if (H5Aclose_async(attr_id, es_id) < 0)
@@ -2085,8 +2085,8 @@ static int
test_attribute_io(void)
{
hsize_t *dims = NULL;
- hbool_t op_failed = false;
- hbool_t is_native_vol = false;
+ bool op_failed = false;
+ bool is_native_vol = false;
size_t num_in_progress;
size_t i, data_size;
hid_t file_id = H5I_INVALID_HID;
@@ -2293,7 +2293,7 @@ static int
test_attribute_io_tconv(void)
{
hsize_t *dims = NULL;
- hbool_t op_failed;
+ bool op_failed;
size_t num_in_progress;
size_t i, data_size;
hid_t file_id = H5I_INVALID_HID;
@@ -2493,7 +2493,7 @@ static int
test_attribute_io_compound(void)
{
hsize_t *dims = NULL;
- hbool_t op_failed;
+ bool op_failed;
size_t num_in_progress;
size_t i, data_size;
hid_t file_id = H5I_INVALID_HID;
@@ -2907,7 +2907,7 @@ test_group(void)
H5G_info_t info2;
H5G_info_t info3;
size_t num_in_progress;
- hbool_t op_failed;
+ bool op_failed;
TESTING("group operations");
@@ -3073,21 +3073,21 @@ error:
static int
test_link(void)
{
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t parent_group_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t gcpl_id = H5I_INVALID_HID;
- hid_t es_id = H5I_INVALID_HID;
- hbool_t existsh1;
- hbool_t existsh2;
- hbool_t existsh3;
- hbool_t existss1;
- hbool_t existss2;
- hbool_t existss3;
- size_t num_in_progress;
- hbool_t op_failed = false;
- hbool_t is_native_vol = false;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ bool existsh1;
+ bool existsh2;
+ bool existsh3;
+ bool existss1;
+ bool existss2;
+ bool existss3;
+ size_t num_in_progress;
+ bool op_failed = false;
+ bool is_native_vol = false;
TESTING("link operations");
@@ -3276,17 +3276,17 @@ test_link(void)
/* Check if existence returns were correct */
if (!existsh1)
- FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist");
+ FAIL_PUTS_ERROR(" link exists returned false for link that should exist");
if (!existss1)
- FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist");
+ FAIL_PUTS_ERROR(" link exists returned false for link that should exist");
if (!existsh2)
- FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist");
+ FAIL_PUTS_ERROR(" link exists returned false for link that should exist");
if (existss2)
- FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist");
+ FAIL_PUTS_ERROR(" link exists returned true for link that should not exist");
if (existsh3)
- FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist");
+ FAIL_PUTS_ERROR(" link exists returned true for link that should not exist");
if (existsh3)
- FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist");
+ FAIL_PUTS_ERROR(" link exists returned true for link that should not exist");
/* Close */
if (H5Gclose_async(parent_group_id, es_id) < 0)
@@ -3342,8 +3342,8 @@ test_ocopy_orefresh(void)
hid_t space_id = H5I_INVALID_HID;
hid_t es_id = H5I_INVALID_HID;
size_t num_in_progress;
- hbool_t op_failed = false;
- hbool_t is_native_vol = false;
+ bool op_failed = false;
+ bool is_native_vol = false;
TESTING("H5Ocopy() and H5Orefresh()");
@@ -3495,12 +3495,12 @@ error:
static int
test_file_reopen(void)
{
- hid_t file_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t reopened_file_id = H5I_INVALID_HID;
- hid_t es_id = H5I_INVALID_HID;
- size_t num_in_progress;
- hbool_t op_failed;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t reopened_file_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ size_t num_in_progress;
+ bool op_failed;
TESTING("H5Freopen()");
@@ -3631,7 +3631,7 @@ H5_api_async_test_parallel(void)
printf("\n * Re-testing with independent metadata reads *\n");
}
- coll_metadata_read = FALSE;
+ coll_metadata_read = false;
for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_async_tests); i++) {
nerrors += (*par_async_tests[i])() ? 1 : 0;
diff --git a/testpar/API/H5_api_dataset_test_parallel.c b/testpar/API/H5_api_dataset_test_parallel.c
index 5639348..0d53d44 100644
--- a/testpar/API/H5_api_dataset_test_parallel.c
+++ b/testpar/API/H5_api_dataset_test_parallel.c
@@ -121,7 +121,7 @@ test_write_dataset_data_verification(void)
TESTING_2("test setup");
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -197,7 +197,7 @@ test_write_dataset_data_verification(void)
{
PART_BEGIN(H5Dwrite_all_read)
{
- hbool_t op_failed = FALSE;
+ bool op_failed = false;
TESTING_2("H5Dwrite using H5S_ALL then H5Dread");
@@ -221,10 +221,10 @@ test_write_dataset_data_verification(void)
if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
H5P_DEFAULT, write_buf) < 0)
- op_failed = TRUE;
+ op_failed = true;
}
else
- op_failed = TRUE;
+ op_failed = true;
if (write_buf) {
free(write_buf);
@@ -239,7 +239,7 @@ test_write_dataset_data_verification(void)
PART_ERROR(H5Dwrite_all_read);
}
- if (op_failed == TRUE) {
+ if (op_failed == true) {
H5_FAILED();
printf(" dataset write on rank 0 failed!\n");
PART_ERROR(H5Dwrite_all_read);
@@ -936,7 +936,7 @@ test_write_dataset_independent(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -1275,7 +1275,7 @@ test_write_dataset_one_proc_0_selection(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -1572,7 +1572,7 @@ test_write_dataset_one_proc_none_selection(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -1882,7 +1882,7 @@ test_write_dataset_one_proc_all_selection(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -2179,7 +2179,7 @@ test_write_dataset_hyper_file_all_mem(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -2447,7 +2447,7 @@ test_write_dataset_all_file_hyper_mem(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -2755,7 +2755,7 @@ test_write_dataset_all_file_point_mem(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -3070,7 +3070,7 @@ test_write_dataset_hyper_file_point_mem(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -3390,7 +3390,7 @@ test_write_dataset_point_file_hyper_mem(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
@@ -3845,7 +3845,7 @@ test_read_dataset_one_proc_0_selection(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -4180,7 +4180,7 @@ test_read_dataset_one_proc_none_selection(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -4526,7 +4526,7 @@ test_read_dataset_one_proc_all_selection(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -4880,7 +4880,7 @@ test_read_dataset_all_file_hyper_mem(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -5228,7 +5228,7 @@ test_read_dataset_all_file_point_mem(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -5581,7 +5581,7 @@ test_read_dataset_hyper_file_point_mem(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -5935,7 +5935,7 @@ test_read_dataset_point_file_hyper_mem(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -6413,7 +6413,7 @@ test_write_multi_chunk_dataset_same_shape_read(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -6876,7 +6876,7 @@ test_write_multi_chunk_dataset_diff_shape_read(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -7277,7 +7277,7 @@ test_overwrite_multi_chunk_dataset_same_shape_read(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
@@ -7803,7 +7803,7 @@ test_overwrite_multi_chunk_dataset_diff_shape_read(void)
/*
* Re-open file on all ranks.
*/
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
H5_FAILED();
diff --git a/testpar/API/H5_api_file_test_parallel.c b/testpar/API/H5_api_file_test_parallel.c
index 9bbcd9c..2d9b5c2 100644
--- a/testpar/API/H5_api_file_test_parallel.c
+++ b/testpar/API/H5_api_file_test_parallel.c
@@ -44,7 +44,7 @@ test_create_file(void)
return 0;
}
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
if ((file_id = H5Fcreate(FILE_CREATE_TEST_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
@@ -93,7 +93,7 @@ test_open_file(void)
TESTING_2("test setup");
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0)
TEST_ERROR;
PASSED();
@@ -229,7 +229,7 @@ test_split_comm_file_access(void)
MPI_Comm_rank(comm, &sub_mpi_rank);
/* setup file access template */
- if ((fapl_id = create_mpi_fapl(comm, info, TRUE)) < 0) {
+ if ((fapl_id = create_mpi_fapl(comm, info, true)) < 0) {
err_occurred = 1;
goto access_end;
}
@@ -315,7 +315,7 @@ cleanup_files(void)
{
hid_t fapl_id = H5I_INVALID_HID;
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) {
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, true)) < 0) {
if (MAINPROCESS)
printf(" failed to create FAPL for deleting test files\n");
return;
diff --git a/testpar/API/H5_api_test_parallel.c b/testpar/API/H5_api_test_parallel.c
index 9b1d432..224c1b8 100644
--- a/testpar/API/H5_api_test_parallel.c
+++ b/testpar/API/H5_api_test_parallel.c
@@ -105,7 +105,7 @@ H5_api_test_run(void)
}
hid_t
-create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read)
+create_mpi_fapl(MPI_Comm comm, MPI_Info info, bool coll_md_read)
{
hid_t ret_pl = H5I_INVALID_HID;
@@ -118,7 +118,7 @@ create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read)
goto error;
if (H5Pset_all_coll_metadata_ops(ret_pl, coll_md_read) < 0)
goto error;
- if (H5Pset_coll_metadata_write(ret_pl, TRUE) < 0)
+ if (H5Pset_coll_metadata_write(ret_pl, true) < 0)
goto error;
return ret_pl;
@@ -236,13 +236,13 @@ main(int argc, char **argv)
srand(seed);
- if (NULL == (test_path_prefix = HDgetenv(HDF5_API_TEST_PATH_PREFIX)))
+ if (NULL == (test_path_prefix = getenv(HDF5_API_TEST_PATH_PREFIX)))
test_path_prefix = "";
- HDsnprintf(H5_api_test_parallel_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", test_path_prefix,
- PARALLEL_TEST_FILE_NAME);
+ snprintf(H5_api_test_parallel_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", test_path_prefix,
+ PARALLEL_TEST_FILE_NAME);
- if (NULL == (vol_connector_string = HDgetenv(HDF5_VOL_CONNECTOR))) {
+ if (NULL == (vol_connector_string = getenv(HDF5_VOL_CONNECTOR))) {
if (MAINPROCESS)
printf("No VOL connector selected; using native VOL connector\n");
vol_connector_name = "native";
@@ -253,7 +253,7 @@ main(int argc, char **argv)
BEGIN_INDEPENDENT_OP(copy_connector_string)
{
- if (NULL == (vol_connector_string_copy = HDstrdup(vol_connector_string))) {
+ if (NULL == (vol_connector_string_copy = strdup(vol_connector_string))) {
if (MAINPROCESS)
fprintf(stderr, "Unable to copy VOL connector string\n");
INDEPENDENT_OP_ERROR(copy_connector_string);
@@ -263,7 +263,7 @@ main(int argc, char **argv)
BEGIN_INDEPENDENT_OP(get_connector_name)
{
- if (NULL == (token = HDstrtok(vol_connector_string_copy, " "))) {
+ if (NULL == (token = strtok(vol_connector_string_copy, " "))) {
if (MAINPROCESS)
fprintf(stderr, "Error while parsing VOL connector string\n");
INDEPENDENT_OP_ERROR(get_connector_name);
@@ -273,7 +273,7 @@ main(int argc, char **argv)
vol_connector_name = token;
- if (NULL != (token = HDstrtok(NULL, " "))) {
+ if (NULL != (token = strtok(NULL, " "))) {
vol_connector_info = token;
}
}
@@ -290,7 +290,7 @@ main(int argc, char **argv)
BEGIN_INDEPENDENT_OP(create_fapl)
{
- if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, FALSE)) < 0) {
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, false)) < 0) {
if (MAINPROCESS)
fprintf(stderr, "Unable to create FAPL\n");
INDEPENDENT_OP_ERROR(create_fapl);
@@ -307,7 +307,7 @@ main(int argc, char **argv)
* Otherwise, HDF5 will default to running the tests
* with the native connector, which could be misleading.
*/
- if (0 != HDstrcmp(vol_connector_name, "native")) {
+ if (0 != strcmp(vol_connector_name, "native")) {
htri_t is_registered;
if ((is_registered = H5VLis_connector_registered_by_name(vol_connector_name)) < 0) {
diff --git a/testpar/API/H5_api_test_parallel.h b/testpar/API/H5_api_test_parallel.h
index 44f9440..ae78814 100644
--- a/testpar/API/H5_api_test_parallel.h
+++ b/testpar/API/H5_api_test_parallel.h
@@ -139,7 +139,7 @@ extern char H5_api_test_parallel_filename[];
{ \
if (MAINPROCESS) { \
printf("Testing %-62s", WHAT); \
- HDputs(""); \
+ puts(""); \
fflush(stdout); \
} \
}
@@ -151,7 +151,7 @@ extern char H5_api_test_parallel_filename[];
*/
#define BEGIN_INDEPENDENT_OP(op_name) \
{ \
- hbool_t ind_op_failed = FALSE; \
+ bool ind_op_failed = false; \
\
{
@@ -176,10 +176,10 @@ extern char H5_api_test_parallel_filename[];
}
#define INDEPENDENT_OP_ERROR(op_name) \
- ind_op_failed = TRUE; \
+ ind_op_failed = true; \
goto op_##op_name##_end;
-hid_t create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read);
+hid_t create_mpi_fapl(MPI_Comm comm, MPI_Info info, bool coll_md_read);
int generate_random_parallel_dimensions(int space_rank, hsize_t **dims_out);
extern int mpi_size;
diff --git a/testpar/API/t_bigio.c b/testpar/API/t_bigio.c
index 3ab4c38..e7bdfb0 100644
--- a/testpar/API/t_bigio.c
+++ b/testpar/API/t_bigio.c
@@ -1212,9 +1212,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY_G((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
VRFY_G((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ ret = H5Pset_coll_metadata_write(ret_pl, true);
VRFY_G((ret >= 0), "");
return (ret_pl);
}
diff --git a/testpar/API/t_chunk_alloc.c b/testpar/API/t_chunk_alloc.c
index 37ea2fa..673563b 100644
--- a/testpar/API/t_chunk_alloc.c
+++ b/testpar/API/t_chunk_alloc.c
@@ -171,7 +171,6 @@ static void
parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id,
hid_t *dataset)
{
- /* HDF5 gubbins */
hid_t memspace, dataspace; /* HDF5 file identifier */
hid_t access_plist; /* HDF5 ID for file access property list */
herr_t hrc; /* HDF5 return code */
@@ -320,7 +319,6 @@ static void
verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id,
hid_t *dataset)
{
- /* HDF5 gubbins */
hid_t dataspace, memspace; /* HDF5 file identifier */
hid_t access_plist; /* HDF5 ID for file access property list */
herr_t hrc; /* HDF5 return code */
diff --git a/testpar/API/t_coll_chunk.c b/testpar/API/t_coll_chunk.c
index b4e057e..99f845f 100644
--- a/testpar/API/t_coll_chunk.c
+++ b/testpar/API/t_coll_chunk.c
@@ -922,7 +922,7 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
/* Only check chunk optimization mode if selection I/O is not being used -
* selection I/O bypasses this IO mode decision - it's effectively always
* multi chunk currently */
- if (facc_type == FACC_MPIO && /* !H5_use_selection_io_g */ TRUE) {
+ if (facc_type == FACC_MPIO && /* !H5_use_selection_io_g */ true) {
switch (api_option) {
case API_LINK_HARD:
status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c
index 228cadc..0da25b0 100644
--- a/testpar/API/t_dset.c
+++ b/testpar/API/t_dset.c
@@ -3041,10 +3041,10 @@ test_actual_io_mode(int selection_mode)
H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
const char *filename;
const char *test_name;
- hbool_t direct_multi_chunk_io;
- hbool_t multi_chunk_io;
- hbool_t is_chunked;
- hbool_t is_collective;
+ bool direct_multi_chunk_io;
+ bool multi_chunk_io;
+ bool is_chunked;
+ bool is_collective;
int mpi_size = -1;
int mpi_rank = -1;
int length;
@@ -3401,10 +3401,10 @@ test_actual_io_mode(int selection_mode)
/* Test values */
if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
- HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
- test_name);
+ snprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
+ test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
}
else {
@@ -3492,7 +3492,7 @@ actual_io_mode_tests(void)
/* Only run these tests if selection I/O is not being used - selection I/O
* bypasses this IO mode decision - it's effectively always multi chunk
* currently */
- if (/* !H5_use_selection_io_g */ TRUE) {
+ if (/* !H5_use_selection_io_g */ true) {
test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
/*
@@ -3579,8 +3579,8 @@ test_no_collective_cause_mode(int selection_mode)
const char *filename;
const char *test_name;
- hbool_t is_chunked = 1;
- hbool_t is_independent = 0;
+ bool is_chunked = 1;
+ bool is_independent = 0;
int mpi_size = -1;
int mpi_rank = -1;
int length;
@@ -3822,12 +3822,12 @@ test_no_collective_cause_mode(int selection_mode)
/* Test values */
memset(message, 0, sizeof(message));
- HDsnprintf(message, sizeof(message),
- "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message), "Local cause of Broken Collective I/O has the correct value for %s.\n",
+ test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
memset(message, 0, sizeof(message));
- HDsnprintf(message, sizeof(message),
- "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message),
+ "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3922,7 +3922,7 @@ dataset_atomicity(void)
herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
int i, j, k;
- hbool_t atomicity = FALSE;
+ bool atomicity = false;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
@@ -4015,7 +4015,7 @@ dataset_atomicity(void)
/* should fail */
H5E_BEGIN_TRY
{
- ret = H5Fset_mpi_atomicity(fid, TRUE);
+ ret = H5Fset_mpi_atomicity(fid, true);
}
H5E_END_TRY
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
@@ -4038,7 +4038,7 @@ dataset_atomicity(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Fset_mpi_atomicity(fid, TRUE);
+ ret = H5Fset_mpi_atomicity(fid, true);
VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
/* open dataset1 (contiguous case) */
@@ -4059,7 +4059,7 @@ dataset_atomicity(void)
/* check that the atomicity flag is set */
ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
- VRFY((atomicity == TRUE), "atomcity set failed");
+ VRFY((atomicity == true), "atomcity set failed");
MPI_Barrier(comm);
@@ -4129,11 +4129,11 @@ dataset_atomicity(void)
read_buf[i] = 8;
}
- atomicity = FALSE;
+ atomicity = false;
/* check that the atomicity flag is set */
ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
- VRFY((atomicity == TRUE), "atomcity set failed");
+ VRFY((atomicity == true), "atomcity set failed");
block[0] = (hsize_t)(dim0 / mpi_size - 1);
block[1] = (hsize_t)(dim1 / mpi_size - 1);
diff --git a/testpar/API/t_file.c b/testpar/API/t_file.c
index c906c78..61d009c 100644
--- a/testpar/API/t_file.c
+++ b/testpar/API/t_file.c
@@ -143,7 +143,7 @@ test_page_buffer_access(void)
H5F_t *f = NULL;
herr_t ret; /* generic return value */
const char *filename;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ bool api_ctx_pushed = false; /* Whether API context pushed */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -174,7 +174,7 @@ test_page_buffer_access(void)
VRFY((file_id < 0), "H5Fcreate failed");
/* disable collective metadata writes for page buffering to work */
- ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ ret = H5Pset_coll_metadata_write(fapl, false);
VRFY((ret >= 0), "");
ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
@@ -204,7 +204,7 @@ test_page_buffer_access(void)
ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0);
VRFY((ret == 0), "");
/* collective metadata writes do not work with page buffering */
- ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
+ ret = H5Pset_coll_metadata_write(fapl_self, false);
VRFY((ret >= 0), "");
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
@@ -213,7 +213,7 @@ test_page_buffer_access(void)
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
/* Get a pointer to the internal file object */
f = (H5F_t *)H5I_object(file_id);
@@ -302,9 +302,9 @@ test_page_buffer_access(void)
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
}
@@ -314,7 +314,7 @@ test_page_buffer_access(void)
ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0);
VRFY((ret == 0), "");
/* collective metadata writes do not work with page buffering */
- ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ ret = H5Pset_coll_metadata_write(fapl, false);
VRFY((ret >= 0), "");
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
@@ -323,7 +323,7 @@ test_page_buffer_access(void)
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
/* Get a pointer to the internal file object */
f = (H5F_t *)H5I_object(file_id);
@@ -436,9 +436,9 @@ test_page_buffer_access(void)
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
free(data);
@@ -463,7 +463,7 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
H5F_t *f = NULL;
H5C_t *cache_ptr = NULL;
H5AC_cache_config_t config;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ bool api_ctx_pushed = false; /* Whether API context pushed */
herr_t ret;
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
@@ -475,7 +475,7 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
f = (H5F_t *)H5I_object(file_id);
VRFY((f != NULL), "");
@@ -483,7 +483,7 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
cache_ptr = f->shared->cache;
VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
- cache_ptr->ignore_tags = TRUE;
+ cache_ptr->ignore_tags = true;
H5C_stats__reset(cache_ptr);
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
@@ -529,25 +529,25 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -570,13 +570,13 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
for (i = 0; i < num_elements; i++)
VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
- HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
}
@@ -592,9 +592,9 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
MPI_Barrier(MPI_COMM_WORLD);
@@ -620,7 +620,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
H5F_t *f = NULL;
H5C_t *cache_ptr = NULL;
H5AC_cache_config_t config;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ bool api_ctx_pushed = false; /* Whether API context pushed */
herr_t ret;
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
@@ -639,7 +639,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((ret == 0), "");
@@ -682,7 +682,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -728,7 +728,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
while (entry_ptr != NULL) {
assert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- assert(entry_ptr->is_dirty == FALSE);
+ assert(entry_ptr->is_dirty == false);
if (!entry_ptr->is_pinned && !entry_ptr->is_protected) {
ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
@@ -756,9 +756,9 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
free(data_array);
@@ -778,7 +778,7 @@ test_file_properties(void)
hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
- hbool_t is_coll;
+ bool is_coll;
htri_t are_equal;
const char *filename;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -847,7 +847,7 @@ test_file_properties(void)
fapl_copy_id = H5Pcopy(fapl_id);
VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((TRUE == are_equal), "H5Pequal");
+ VRFY((true == are_equal), "H5Pequal");
/* Add a property to the copy and ensure it's different now */
mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
@@ -855,7 +855,7 @@ test_file_properties(void)
ret = H5Pset_mpi_params(fapl_copy_id, comm, info);
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((FALSE == are_equal), "H5Pequal");
+ VRFY((false == are_equal), "H5Pequal");
/* Add a property with the same key but a different value to the original
* and ensure they are still different.
@@ -865,7 +865,7 @@ test_file_properties(void)
ret = H5Pset_mpi_params(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((FALSE == are_equal), "H5Pequal");
+ VRFY((false == are_equal), "H5Pequal");
/* Set the second property in the original to the same
* value as the copy and ensure they are the same now.
@@ -875,7 +875,7 @@ test_file_properties(void)
ret = H5Pset_mpi_params(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((TRUE == are_equal), "H5Pequal");
+ VRFY((true == are_equal), "H5Pequal");
/* create the file */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
@@ -886,12 +886,12 @@ test_file_properties(void)
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement");
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
@@ -907,12 +907,12 @@ test_file_properties(void)
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement");
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
@@ -921,10 +921,10 @@ test_file_properties(void)
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
/* Collective metadata writes */
- ret = H5Pset_coll_metadata_write(fapl_id, TRUE);
+ ret = H5Pset_coll_metadata_write(fapl_id, true);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
/* Collective metadata read API calling requirement */
- ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(fapl_id, true);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
@@ -934,12 +934,12 @@ test_file_properties(void)
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement");
/* close fapl and retrieve it from file */
ret = H5Pclose(fapl_id);
@@ -954,12 +954,12 @@ test_file_properties(void)
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement");
/* close file */
ret = H5Fclose(fid);
@@ -1022,7 +1022,7 @@ test_delete(void)
/* Verify that the file is an HDF5 file */
is_hdf5 = H5Fis_accessible(filename, fapl_id);
- VRFY((TRUE == is_hdf5), "H5Fis_accessible");
+ VRFY((true == is_hdf5), "H5Fis_accessible");
/* Delete the file */
ret = H5Fdelete(filename, fapl_id);
diff --git a/testpar/API/t_file_image.c b/testpar/API/t_file_image.c
index 16cd521..3b582ad 100644
--- a/testpar/API/t_file_image.c
+++ b/testpar/API/t_file_image.c
@@ -78,7 +78,7 @@ file_image_daisy_chain_test(void)
void *image_ptr = NULL;
ssize_t bytes_read;
ssize_t image_len;
- hbool_t vector_ok = TRUE;
+ bool vector_ok = true;
htri_t tri_result;
/* set up MPI parameters */
@@ -100,7 +100,7 @@ file_image_daisy_chain_test(void)
}
/* setup file name */
- HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
+ snprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
if (mpi_rank == 0) {
@@ -110,7 +110,7 @@ file_image_daisy_chain_test(void)
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
VRFY((err >= 0), "setting core file driver in fapl.");
file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id);
@@ -199,7 +199,7 @@ file_image_daisy_chain_test(void)
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
VRFY((err >= 0), "setting core file driver in fapl.");
err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
@@ -215,7 +215,7 @@ file_image_daisy_chain_test(void)
VRFY((dset_type_id >= 0), "obtained data set type");
tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
- VRFY((tri_result == TRUE), "verified data set type");
+ VRFY((tri_result == true), "verified data set type");
space_id = H5Dget_space(dset_id);
VRFY((space_id >= 0), "opened data space");
@@ -233,10 +233,10 @@ file_image_daisy_chain_test(void)
err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
- vector_ok = TRUE;
+ vector_ok = true;
for (i = 0; i < mpi_size; i++)
if (vector_ptr[i] != i)
- vector_ok = FALSE;
+ vector_ok = false;
VRFY((vector_ok), "verified received vector.");
free(vector_ptr);
@@ -281,7 +281,7 @@ file_image_daisy_chain_test(void)
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
VRFY((err >= 0), "setting core file driver in fapl.");
err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
@@ -298,7 +298,7 @@ file_image_daisy_chain_test(void)
VRFY((dset_type_id >= 0), "obtained data set type");
tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
- VRFY((tri_result == TRUE), "verified data set type");
+ VRFY((tri_result == true), "verified data set type");
space_id = H5Dget_space(dset_id);
VRFY((space_id >= 0), "opened data space");
@@ -316,15 +316,15 @@ file_image_daisy_chain_test(void)
err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
- vector_ok = TRUE;
+ vector_ok = true;
for (i = 0; i < mpi_size; i++) {
if (i < mpi_rank) {
if (vector_ptr[i] != i)
- vector_ok = FALSE;
+ vector_ok = false;
}
else {
if (vector_ptr[i] != -1)
- vector_ok = FALSE;
+ vector_ok = false;
}
}
VRFY((vector_ok), "verified received vector.");
diff --git a/testpar/API/t_filter_read.c b/testpar/API/t_filter_read.c
index 06507e4..7275dd9 100644
--- a/testpar/API/t_filter_read.c
+++ b/testpar/API/t_filter_read.c
@@ -189,7 +189,7 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
(unsigned long)(hs_offset[1] + j));
fprintf(stderr, " At original: %d\n", (int)points[i * size[1] + (size_t)hs_offset[1] + j]);
fprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]);
- VRFY(FALSE, "");
+ VRFY(false, "");
}
}
}
diff --git a/testpar/API/t_mdset.c b/testpar/API/t_mdset.c
index cc49e7b..7c97898 100644
--- a/testpar/API/t_mdset.c
+++ b/testpar/API/t_mdset.c
@@ -211,7 +211,7 @@ multiple_dset_write(void)
VRFY((ret >= 0), "set fill-value succeeded");
for (n = 0; n < ndatasets; n++) {
- HDsnprintf(dname, sizeof(dname), "dataset %d", n);
+ snprintf(dname, sizeof(dname), "dataset %d", n);
dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset > 0), dname);
@@ -253,7 +253,7 @@ compact_dataset(void)
herr_t ret;
const char *filename;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
+ bool prop_value;
#endif
size = get_size();
@@ -354,10 +354,10 @@ compact_dataset(void)
VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
+ prop_value = false;
ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
VRFY((ret >= 0), "H5Pget succeeded");
- VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),
+ VRFY((prop_value == false && dxfer_coll_type == DXFER_COLLECTIVE_IO),
"rank 0 Bcast optimization was performed for a compact dataset");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
@@ -658,7 +658,7 @@ dataset_fillvalue(void)
herr_t ret; /* Generic return value */
const char *filename;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
+ bool prop_value;
#endif
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -737,13 +737,13 @@ dataset_fillvalue(void)
VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
+ prop_value = false;
ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
VRFY((ret >= 0), "testing property list get succeeded");
if (ii == 0)
- VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == false), "correctly handled rank 0 Bcast");
else
- VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == true), "correctly handled rank 0 Bcast");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* Verify all data read are the fill value 0 */
@@ -827,13 +827,13 @@ dataset_fillvalue(void)
VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
+ prop_value = false;
ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
VRFY((ret >= 0), "testing property list get succeeded");
if (ii == 0)
- VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == false), "correctly handled rank 0 Bcast");
else
- VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == true), "correctly handled rank 0 Bcast");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* Verify correct data read */
@@ -975,11 +975,11 @@ collective_group_write(void)
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
for (m = 0; m < ngroups; m++) {
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
- HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ snprintf(dname, sizeof(dname), "dataset%d", m);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1051,7 +1051,7 @@ independent_group_read(void)
}
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- H5Pset_all_coll_metadata_ops(plist, FALSE);
+ H5Pset_all_coll_metadata_ops(plist, false);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
VRFY((fid > 0), "H5Fopen");
@@ -1093,12 +1093,12 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
VRFY((outdata != NULL), "malloc succeeded for outdata");
/* open every group under root group. */
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
- HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ snprintf(dname, sizeof(dname), "dataset%d", m);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1211,7 +1211,7 @@ multiple_group_write(void)
/* creates ngroups groups under the root group, writes datasets in
* parallel. */
for (m = 0; m < ngroups; m++) {
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
@@ -1267,7 +1267,7 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outme != NULL), "malloc succeeded for outme");
for (n = 0; n < NDATASET; n++) {
- HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ snprintf(dname, sizeof(dname), "dataset%d", n);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1305,7 +1305,7 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
}
#endif /* BARRIER_CHECKS */
- HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
@@ -1376,7 +1376,7 @@ multiple_group_read(void)
/* open every group under root group. */
for (m = 0; m < ngroups; m++) {
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
@@ -1433,7 +1433,7 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outdata != NULL), "malloc succeeded for outdata");
for (n = 0; n < NDATASET; n++) {
- HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ snprintf(dname, sizeof(dname), "dataset%d", n);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1484,7 +1484,7 @@ recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
nerrors += err_num;
if (counter < GROUP_DEPTH) {
- HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
recursive_read_group(memspace, filespace, child_gid, counter + 1);
@@ -1506,7 +1506,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (this_type == is_group) {
- HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
sid = H5Screate(H5S_SCALAR);
aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
H5Awrite(aid, H5T_NATIVE_INT, &num);
@@ -1514,7 +1514,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
H5Sclose(sid);
} /* end if */
else if (this_type == is_dset) {
- HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
for (i = 0; i < 8; i++)
attr_data[i] = i;
sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
@@ -1537,14 +1537,14 @@ read_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (this_type == is_group) {
- HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
H5Aread(aid, H5T_NATIVE_INT, &in_num);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
H5Aclose(aid);
}
else if (this_type == is_dset) {
- HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
for (i = 0; i < 8; i++)
out_data[i] = i;
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
@@ -1664,8 +1664,8 @@ io_mode_confusion(void)
* test bed related variables
*/
- const char *fcn_name = "io_mode_confusion";
- const hbool_t verbose = FALSE;
+ const char *fcn_name = "io_mode_confusion";
+ const bool verbose = false;
#if 0
const H5Ptest_param_t *pt;
#endif
@@ -1931,8 +1931,8 @@ rr_obj_hdr_flush_confusion(void)
MPI_Comm comm;
/* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion";
- const hbool_t verbose = FALSE;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion";
+ const bool verbose = false;
/* Create two new private communicators from MPI_COMM_WORLD.
* Even and odd ranked processes go to comm_writers and comm_readers
@@ -2022,8 +2022,8 @@ rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
int steps_done = 0;
/* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
- const hbool_t verbose = FALSE;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
+ const bool verbose = false;
#if 0
const H5Ptest_param_t *pt;
#endif
@@ -2405,8 +2405,8 @@ rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
int steps_done = -1; /* How far (steps) have been verified */
/* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
- const hbool_t verbose = FALSE;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
+ const bool verbose = false;
#if 0
const H5Ptest_param_t *pt;
#endif
diff --git a/testpar/API/t_prop.c b/testpar/API/t_prop.c
index 9c91906..a4d90c4 100644
--- a/testpar/API/t_prop.c
+++ b/testpar/API/t_prop.c
@@ -119,12 +119,12 @@ test_plist_ed(void)
hsize_t max_size[1]; /*data space maximum size */
const char *c_to_f = "x+32";
H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION,
- TRUE,
- FALSE,
- FALSE,
+ true,
+ false,
+ false,
"temp",
- TRUE,
- FALSE,
+ true,
+ false,
(2 * 2048 * 1024),
0.3,
(64 * 1024 * 1024),
@@ -133,7 +133,7 @@ test_plist_ed(void)
H5C_incr__threshold,
0.8,
3.0,
- TRUE,
+ true,
(8 * 1024 * 1024),
H5C_flash_incr__add_space,
2.0,
@@ -141,10 +141,10 @@ test_plist_ed(void)
H5C_decr__age_out_with_threshold,
0.997,
0.8,
- TRUE,
+ true,
(3 * 1024 * 1024),
3,
- FALSE,
+ false,
0.2,
(256 * 2048),
1 /* H5AC__DEFAULT_METADATA_WRITE_STRATEGY */};
@@ -290,7 +290,7 @@ test_plist_ed(void)
lcpl = H5Pcreate(H5P_LINK_CREATE);
VRFY((lcpl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_create_intermediate_group(lcpl, TRUE);
+ ret = H5Pset_create_intermediate_group(lcpl, true);
VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
@@ -569,7 +569,7 @@ external_links(void)
/* test opening a group that is to an external link, the external linked
file should inherit the source file's access properties */
- HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
+ snprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
group = H5Gopen2(fid, link_path, H5P_DEFAULT);
VRFY((group >= 0), "H5Gopen succeeded");
ret = H5Gclose(group);
@@ -600,10 +600,10 @@ external_links(void)
VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
tri_status = H5Lexists(fid, link_path, H5P_DEFAULT);
- VRFY((tri_status == TRUE), "H5Lexists succeeded");
+ VRFY((tri_status == true), "H5Lexists succeeded");
tri_status = H5Lexists(fid, link_path, lapl);
- VRFY((tri_status == TRUE), "H5Lexists succeeded");
+ VRFY((tri_status == true), "H5Lexists succeeded");
group = H5Oopen(fid, link_path, H5P_DEFAULT);
VRFY((group >= 0), "H5Oopen succeeded");
diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c
index 06bb4ff..004ce1e 100644
--- a/testpar/API/t_shapesame.c
+++ b/testpar/API/t_shapesame.c
@@ -130,14 +130,14 @@ struct hs_dr_pio_test_vars_t {
static void
hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size,
const int chunk_edge_size, const int small_rank, const int large_rank,
- const hbool_t use_collective_io, const hid_t dset_type, const int express_test,
+ const bool use_collective_io, const hid_t dset_type, const int express_test,
struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
const char *fcnName = "hs_dr_pio_test__setup()";
#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
const char *filename;
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i;
int mrc;
int mpi_rank; /* needed by the VRFY macro */
@@ -470,7 +470,7 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
/* verify that the correct data was written to the small data set */
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->small_ds_buf_1;
i = 0;
@@ -478,12 +478,12 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "small ds init data good.");
+ VRFY((mis_match == false), "small ds init data good.");
/* setup selections for writing initial data to the large data set */
@@ -541,7 +541,7 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
/* verify that the correct data was written to the large data set */
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->large_ds_buf_1;
i = 0;
@@ -549,12 +549,12 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "large ds init data good.");
+ VRFY((mis_match == false), "large ds init data good.");
/* sync with the other processes before changing data */
mrc = MPI_Barrier(MPI_COMM_WORLD);
@@ -702,7 +702,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -824,7 +824,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
@@ -842,7 +842,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
/* verify that expected data is retrieved */
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->small_ds_slice_buf;
expected_value =
(uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
@@ -854,7 +854,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
*ptr_1 = 0; /* zero data for next use */
@@ -863,7 +863,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
expected_value++;
}
- VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+ VRFY((mis_match == false), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -910,7 +910,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -1053,7 +1053,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
@@ -1089,7 +1089,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
expected_value++;
}
@@ -1097,7 +1097,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- mis_match = TRUE;
+ mis_match = true;
}
}
/* zero out the value for the next pass */
@@ -1106,7 +1106,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+ VRFY((mis_match == false), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -1155,7 +1155,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -1319,7 +1319,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* on disk full square selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+ VRFY((check == true), "H5Sselect_shape_same passed.");
/* write the slice from the in memory large data set to the
* slice of the on disk small dataset. */
@@ -1342,7 +1342,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
/* verify that expected data is retrieved */
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->small_ds_buf_1;
expected_value =
@@ -1363,7 +1363,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
expected_value++;
}
@@ -1371,7 +1371,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- mis_match = TRUE;
+ mis_match = true;
}
}
/* zero out the value for the next pass */
@@ -1380,7 +1380,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE), "small slice write from large ds data good.");
+ VRFY((mis_match == false), "small slice write from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -1431,7 +1431,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -1602,7 +1602,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* write the small data set slice from memory to the
* target slice of the disk data set
@@ -1650,7 +1650,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
expected_value++;
@@ -1659,7 +1659,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- mis_match = TRUE;
+ mis_match = true;
}
}
/* zero out buffer for next test */
@@ -1667,7 +1667,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE), "small ds slice write to large ds slice data good.");
+ VRFY((mis_match == false), "small ds slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
}
@@ -1701,7 +1701,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
static void
contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size,
- const int small_rank, const int large_rank, const hbool_t use_collective_io,
+ const int small_rank, const int large_rank, const bool use_collective_io,
const hid_t dset_type, int express_test, int *skips_ptr, int max_skips,
int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
int mpi_rank)
@@ -1947,7 +1947,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 0;
contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type,
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type,
express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
@@ -1958,7 +1958,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 0;
contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type,
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type,
express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
@@ -1969,7 +1969,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 5;
contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type,
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, false, dset_type,
express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
@@ -1980,14 +1980,14 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 5;
contig_hs_dr_pio_test__run_test(
- test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type,
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, true, dset_type,
express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
test_num++;
break;
/* end of case COL_CHUNKED */
default:
- VRFY((FALSE), "unknown test type");
+ VRFY((false), "unknown test type");
break;
} /* end of switch(sstest_type) */
@@ -2046,7 +2046,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
#endif
- hbool_t first_selection = TRUE;
+ bool first_selection = true;
int i, j, k, l, m;
int n_cube_offset;
int sel_offset;
@@ -2249,7 +2249,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
if (first_selection) {
- first_selection = FALSE;
+ first_selection = false;
ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]),
&(stride[n_cube_offset]), &(count[n_cube_offset]),
@@ -2321,7 +2321,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
** ckrbrd_hs_dr_pio_test__verify_data():
**
** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** expected data. Return true if it does, and false
** otherwise.
**
** The supplied buffer is presumed to this process's slice
@@ -2374,17 +2374,17 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0
-static hbool_t
+static bool
ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size,
const int checker_edge_size, uint32_t first_expected_val,
- hbool_t buf_starts_in_checker)
+ bool buf_starts_in_checker)
{
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
#endif
- hbool_t good_data = TRUE;
- hbool_t in_checker;
- hbool_t start_in_checker[5];
+ bool good_data = true;
+ bool in_checker;
+ bool start_in_checker[5];
uint32_t expected_value;
uint32_t *val_ptr;
int i, j, k, l, m; /* to track position in n-cube */
@@ -2476,7 +2476,7 @@ do {
if (*val_ptr != expected_value) {
- good_data = FALSE;
+ good_data = false;
}
/* zero out buffer for reuse */
@@ -2484,7 +2484,7 @@ do {
}
else if (*val_ptr != 0) {
- good_data = FALSE;
+ good_data = false;
/* zero out buffer for reuse */
*val_ptr = 0;
@@ -2546,7 +2546,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
uint32_t *ptr_0;
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
uint32_t expected_value;
int mpi_rank; /* needed by VRFY */
@@ -2698,7 +2698,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
@@ -2729,9 +2729,9 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
data_ok = ckrbrd_hs_dr_pio_test__verify_data(
tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+ tv_ptr->checker_edge_size, expected_value, (bool)true);
- VRFY((data_ok == TRUE), "small slice read from large ds data good.");
+ VRFY((data_ok == true), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -2778,7 +2778,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
size_t u;
size_t start_index;
@@ -2920,7 +2920,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
@@ -2938,7 +2938,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
/* verify that the expected data and only the
* expected data was read.
*/
- data_ok = TRUE;
+ data_ok = true;
ptr_1 = tv_ptr->large_ds_buf_1;
expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index =
@@ -2978,7 +2978,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
}
/* zero out the value for the next pass */
@@ -2987,13 +2987,13 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small to large ds data good(1).");
+ VRFY((data_ok == true), "slice read from small to large ds data good(1).");
data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1,
tv_ptr->edge_size, tv_ptr->checker_edge_size,
- expected_value, (hbool_t)TRUE);
+ expected_value, (bool)true);
- VRFY((data_ok == TRUE), "slice read from small to large ds data good(2).");
+ VRFY((data_ok == true), "slice read from small to large ds data good(2).");
ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1;
@@ -3001,7 +3001,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
}
/* zero out the value for the next pass */
@@ -3010,7 +3010,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small to large ds data good(3).");
+ VRFY((data_ok == true), "slice read from small to large ds data good(3).");
(tv_ptr->tests_run)++;
}
@@ -3061,7 +3061,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
size_t u;
size_t start_index;
@@ -3242,7 +3242,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* slice of the small data set as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+ VRFY((check == true), "H5Sselect_shape_same passed.");
/* write the checker board selection of the slice from the in
* memory large data set to the slice of the on disk small
@@ -3279,33 +3279,33 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
assert(start_index < stop_index);
assert(stop_index <= tv_ptr->small_ds_size);
- data_ok = TRUE;
+ data_ok = true;
ptr_1 = tv_ptr->small_ds_buf_1;
for (u = 0; u < start_index; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+ tv_ptr->checker_edge_size, expected_value, (bool)true);
ptr_1 = tv_ptr->small_ds_buf_1;
for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
- VRFY((data_ok == TRUE), "large slice write slice to small slice data good.");
+ VRFY((data_ok == true), "large slice write slice to small slice data good.");
(tv_ptr->tests_run)++;
}
@@ -3356,7 +3356,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
size_t u;
size_t start_index;
@@ -3533,7 +3533,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* write the small data set slice from memory to the
* target slice of the disk data set
@@ -3573,33 +3573,33 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
assert(start_index < stop_index);
assert(stop_index < tv_ptr->large_ds_size);
- data_ok = TRUE;
+ data_ok = true;
ptr_1 = tv_ptr->large_ds_buf_1;
for (u = 0; u < start_index; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+ tv_ptr->checker_edge_size, expected_value, (bool)true);
ptr_1 = tv_ptr->large_ds_buf_1;
for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
- VRFY((data_ok == TRUE), "small ds cb slice write to large ds slice data good.");
+ VRFY((data_ok == true), "small ds cb slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
}
@@ -3634,10 +3634,9 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
static void
ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size,
const int chunk_edge_size, const int small_rank, const int large_rank,
- const hbool_t use_collective_io, const hid_t dset_type,
- const int express_test, int *skips_ptr, int max_skips,
- int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
- int mpi_rank)
+ const bool use_collective_io, const hid_t dset_type, const int express_test,
+ int *skips_ptr, int max_skips, int64_t *total_tests_ptr,
+ int64_t *tests_run_ptr, int64_t *tests_skipped_ptr, int mpi_rank)
{
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
@@ -3870,7 +3869,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* contiguous data set, independent I/O */
chunk_edge_size = 0;
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, FALSE, dset_type, express_test,
+ small_rank, large_rank, false, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
&tests_skipped, mpi_rank);
test_num++;
@@ -3881,7 +3880,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* contiguous data set, collective I/O */
chunk_edge_size = 0;
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, TRUE, dset_type, express_test,
+ small_rank, large_rank, true, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
&tests_skipped, mpi_rank);
test_num++;
@@ -3892,7 +3891,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, independent I/O */
chunk_edge_size = 5;
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, FALSE, dset_type, express_test,
+ small_rank, large_rank, false, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
&tests_skipped, mpi_rank);
test_num++;
@@ -3903,7 +3902,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, collective I/O */
chunk_edge_size = 5;
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, TRUE, dset_type, express_test,
+ small_rank, large_rank, true, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
&tests_skipped, mpi_rank);
test_num++;
@@ -3911,7 +3910,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* end of case COL_CHUNKED */
default:
- VRFY((FALSE), "unknown test type");
+ VRFY((false), "unknown test type");
break;
} /* end of switch(sstest_type) */
@@ -4188,9 +4187,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
VRFY((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ ret = H5Pset_coll_metadata_write(ret_pl, true);
VRFY((ret >= 0), "");
return (ret_pl);
}
diff --git a/testpar/API/t_span_tree.c b/testpar/API/t_span_tree.c
index 40ccb8f..e2f148c 100644
--- a/testpar/API/t_span_tree.c
+++ b/testpar/API/t_span_tree.c
@@ -21,7 +21,7 @@
one in collective mode,
2) We will read two datasets with the same hyperslab selection settings,
1. independent read to read independent output,
- independent read to read collecive output,
+ independent read to read collective output,
Compare the result,
If the result is the same, then collective write succeeds.
2. collective read to read independent output,
@@ -498,7 +498,7 @@ coll_write_test(int chunk_factor)
For testing collective hyperslab selection write
In this test, we are using independent read to check
- the correctedness of collective write compared with
+ the correctness of collective write compared with
independent write,
In order to thoroughly test this feature, we choose
@@ -593,7 +593,7 @@ coll_write_test(int chunk_factor)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
/*
- * Select two hyperslabs in memory. Hyperslabs has the same
+ * Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace
* Only the starting point is different.
* The first selection
@@ -833,7 +833,7 @@ coll_read_test(void)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
/*
- * Select two hyperslabs in memory. Hyperslabs has the same
+ * Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace.
* Only the starting point is different.
* The first selection
@@ -967,7 +967,7 @@ coll_read_test(void)
** sel_rank fastest changing indices, with origin (in the
** higher indices) as indicated by the start array.
**
-** Note that this function, is hard coded to presume a
+** Note that this function is hard-coded to presume a
** maximum dataspace rank of 5.
**
** While this maximum is declared as a constant, increasing
@@ -993,7 +993,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
const char *fcnName = "lower_dim_size_comp_test__select_checker_board():";
#endif
- hbool_t first_selection = TRUE;
+ bool first_selection = true;
int i, j, k, l, m;
int ds_offset;
int sel_offset;
@@ -1045,7 +1045,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
* Note that the following computation depends on the C99
* requirement that integer division discard any fraction
* (truncation towards zero) to function correctly. As we
- * now require C99, this shouldn't be a problem, but noting
+ * now require C99, this shouldn't be a problem, but note
* it may save us some pain if we are ever obliged to support
* pre-C99 compilers again.
*/
@@ -1074,7 +1074,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
/* Now set up the stride and block arrays, and portions of the start
* and count arrays that will not be altered during the selection of
- * the checker board.
+ * the checkerboard.
*/
i = 0;
while (i < ds_offset) {
@@ -1215,7 +1215,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
if (first_selection) {
- first_selection = FALSE;
+ first_selection = false;
ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]),
&(stride[ds_offset]), &(count[ds_offset]),
@@ -1291,16 +1291,16 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** lower_dim_size_comp_test__verify_data():
**
** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** expected data. Return true if it does, and false
** otherwise.
**
-** The supplied buffer is presumed to this process's slice
+** The supplied buffer is presumed to be this process's slice
** of the target data set. Each such slice will be an
** n-cube of rank (rank -1) and the supplied edge_size with
** origin (mpi_rank, 0, ... , 0) in the target data set.
**
** Further, the buffer is presumed to be the result of reading
-** or writing a checker board selection of an m (1 <= m <
+** or writing a checkerboard selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
** of the target data set. Also, this slice must be parallel
** to the fastest changing indices.
@@ -1311,7 +1311,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** with the natural numbers listed in order from the origin
** along the fastest changing axis.
**
-** Thus for a 20x10x10 dataset, the value stored in location
+** Thus, for a 20x10x10 dataset, the value stored in location
** (x, y, z) (assuming that z is the fastest changing index
** and x the slowest) is assumed to be:
**
@@ -1319,7 +1319,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
**
** Further, supposing that this is process 10, this process's
** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialize (prior
+** (10, 0, 0) in the data set, and would be initialized (prior
** to the checkerboard selection) as follows:
**
** 1000, 1001, 1002, ... 1008, 1009
@@ -1344,20 +1344,20 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0
-static hbool_t
+static bool
lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
const int mpi_rank,
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
const int rank, const int edge_size, const int checker_edge_size,
- uint32_t first_expected_val, hbool_t buf_starts_in_checker)
+ uint32_t first_expected_val, bool buf_starts_in_checker)
{
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
const char *fcnName = "lower_dim_size_comp_test__verify_data():";
#endif
- hbool_t good_data = TRUE;
- hbool_t in_checker;
- hbool_t start_in_checker[5];
+ bool good_data = true;
+ bool in_checker;
+ bool start_in_checker[5];
uint32_t expected_value;
uint32_t *val_ptr;
int i, j, k, l, m; /* to track position in n-cube */
@@ -1450,7 +1450,7 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
if (*val_ptr != expected_value) {
- good_data = FALSE;
+ good_data = false;
}
/* zero out buffer for reuse */
@@ -1458,7 +1458,7 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
}
else if (*val_ptr != 0) {
- good_data = FALSE;
+ good_data = false;
/* zero out buffer for reuse */
*val_ptr = 0;
@@ -1506,7 +1506,7 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
#define LDSCT_DS_RANK 5
static void
-lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io,
+lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_collective_io,
const hid_t dset_type)
{
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
@@ -1516,8 +1516,8 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
hsize_t max_dims[32];
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
const char *filename;
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
+ bool data_ok = false;
+ bool mis_match = false;
int i;
int start_index;
int stop_index;
@@ -1859,10 +1859,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
}
check = H5Sselect_valid(mem_small_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(mem_small_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(mem_small_ds_sid) returns true");
check = H5Sselect_valid(file_small_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(file_small_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(file_small_ds_sid) returns true");
/* write the initial value of the small data set to file */
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
@@ -1893,7 +1893,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* and reset the buffer to zero in passing.
*/
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = small_ds_buf_1;
i = 0;
@@ -1901,7 +1901,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
*ptr_1 = (uint32_t)0;
@@ -1909,7 +1909,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "small ds init data good.");
+ VRFY((mis_match == false), "small ds init data good.");
/* setup selections for writing initial data to the large data set */
start[0] = (hsize_t)(mpi_rank + 1);
@@ -2017,10 +2017,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
check = H5Sselect_valid(mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(mem_large_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(mem_large_ds_sid) returns true");
check = H5Sselect_valid(file_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(file_large_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(file_large_ds_sid) returns true");
/* write the initial value of the large data set to file */
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
@@ -2054,7 +2054,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* in passing, reset the buffer to zeros
*/
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = large_ds_buf_1;
i = 0;
@@ -2062,7 +2062,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
*ptr_1 = (uint32_t)0;
@@ -2070,7 +2070,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "large ds init data good.");
+ VRFY((mis_match == false), "large ds init data good.");
/***********************************/
/***** INITIALIZATION COMPLETE *****/
@@ -2108,7 +2108,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* selections as having the same shape.
*/
check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed (1)");
+ VRFY((check == true), "H5Sselect_shape_same passed (1)");
ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist,
large_ds_buf_1);
@@ -2123,7 +2123,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* verify that expected data is retrieved */
- data_ok = TRUE;
+ data_ok = true;
start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
(large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
@@ -2142,14 +2142,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != (uint32_t)0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small ds data good(1).");
+ VRFY((data_ok == true), "slice read from small ds data good(1).");
data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
@@ -2158,11 +2158,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* rank */ 2,
/* edge_size */ 10,
/* checker_edge_size */ 3, expected_value,
- /* buf_starts_in_checker */ TRUE);
+ /* buf_starts_in_checker */ true);
- VRFY((data_ok == TRUE), "slice read from small ds data good(2).");
+ VRFY((data_ok == true), "slice read from small ds data good(2).");
- data_ok = TRUE;
+ data_ok = true;
ptr_1 += small_ds_slice_size;
@@ -2170,14 +2170,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != (uint32_t)0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small ds data good(3).");
+ VRFY((data_ok == true), "slice read from small ds data good(3).");
/* read a checkerboard selection of a slice of the process slice of
* the large on disk data set into the process slice of the small
@@ -2205,7 +2205,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* selections as having the same shape.
*/
check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed (2)");
+ VRFY((check == true), "H5Sselect_shape_same passed (2)");
ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist,
small_ds_buf_1);
@@ -2220,7 +2220,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* verify that expected data is retrieved */
- data_ok = TRUE;
+ data_ok = true;
expected_value =
(uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
@@ -2242,14 +2242,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != (uint32_t)0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from large ds data good(1).");
+ VRFY((data_ok == true), "slice read from large ds data good(1).");
data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
@@ -2258,11 +2258,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* rank */ 2,
/* edge_size */ 10,
/* checker_edge_size */ 3, expected_value,
- /* buf_starts_in_checker */ TRUE);
+ /* buf_starts_in_checker */ true);
- VRFY((data_ok == TRUE), "slice read from large ds data good(2).");
+ VRFY((data_ok == true), "slice read from large ds data good(2).");
- data_ok = TRUE;
+ data_ok = true;
ptr_1 += small_ds_slice_size;
@@ -2277,14 +2277,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from large ds data good(3).");
+ VRFY((data_ok == true), "slice read from large ds data good(3).");
/* Close dataspaces */
ret = H5Sclose(full_mem_small_ds_sid);
@@ -2373,10 +2373,10 @@ lower_dim_size_comp_test(void)
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
chunk_edge_size = 0;
- lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT);
chunk_edge_size = 5;
- lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT);
} /* end for */
return;
@@ -2398,11 +2398,11 @@ lower_dim_size_comp_test(void)
*
* 1) Reads or writes exactly one chunk,
*
- * 2) Has no in memory buffer for any other chunk.
+ * 2) Has no in-memory buffer for any other chunk.
*
- * The test differers from Rob Latham's bug report in
- * that is runs with an arbitrary number of processes,
- * and uses a 1 dimensional dataset.
+ * The test differs from Rob Latham's bug report in
+ * that it runs with an arbitrary number of processes,
+ * and uses a 1-dimensional dataset.
*
* Return: void
*-------------------------------------------------------------------------
@@ -2415,7 +2415,7 @@ link_chunk_collective_io_test(void)
{
/* const char *fcnName = "link_chunk_collective_io_test()"; */
const char *filename;
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i;
int mrc;
int mpi_rank;
@@ -2552,7 +2552,7 @@ link_chunk_collective_io_test(void)
VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded");
/* verify the data */
- mis_match = FALSE;
+ mis_match = false;
for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
diff = local_data_written[i] - local_data_read[i];
@@ -2560,10 +2560,10 @@ link_chunk_collective_io_test(void)
if (diff >= 0.001) {
- mis_match = TRUE;
+ mis_match = true;
}
}
- VRFY((mis_match == FALSE), "dataset data good.");
+ VRFY((mis_match == false), "dataset data good.");
/* Close dataspaces */
ret = H5Sclose(write_mem_ds_sid);
diff --git a/testpar/API/testphdf5.c b/testpar/API/testphdf5.c
index a341112..1d42c61 100644
--- a/testpar/API/testphdf5.c
+++ b/testpar/API/testphdf5.c
@@ -277,9 +277,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
VRFY((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ ret = H5Pset_coll_metadata_write(ret_pl, true);
VRFY((ret >= 0), "");
return (ret_pl);
}
diff --git a/testpar/API/testphdf5.h b/testpar/API/testphdf5.h
index 545ab0f..59dd577 100644
--- a/testpar/API/testphdf5.h
+++ b/testpar/API/testphdf5.h
@@ -39,12 +39,12 @@ enum H5TEST_COLL_CHUNK_API {
API_MULTI_IND
};
-#ifndef FALSE
-#define FALSE 0
+#ifndef false
+#define false 0
#endif
-#ifndef TRUE
-#define TRUE 1
+#ifndef true
+#define true 1
#endif
/* Constants definitions */
@@ -94,8 +94,8 @@ enum H5TEST_COLL_CHUNK_API {
#define LINK_TRUE_NUM_CHUNK 2
#define LINK_FALSE_NUM_CHUNK 6
#define MULTI_TRUE_PERCENT 50
-#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true"
-#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false"
+#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_TRUE"
+#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_FALSE"
#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard"
#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll"
@@ -217,8 +217,8 @@ enum H5TEST_COLL_CHUNK_API {
/* Collective chunk instrumentation properties */
#define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard"
#define H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME "coll_chunk_multi_hard"
-#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_true"
-#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_false"
+#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_TRUE"
+#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_FALSE"
#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME "coll_chunk_multi_coll"
#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME "coll_chunk_multi_ind"
@@ -230,8 +230,8 @@ enum H5TEST_COLL_CHUNK_API {
#define H5D_XFER_COLL_RANK0_BCAST_NAME "coll_rank0_bcast"
/* Definitions for general collective I/O instrumentation properties */
-#define H5D_XFER_COLL_RANK0_BCAST_SIZE sizeof(hbool_t)
-#define H5D_XFER_COLL_RANK0_BCAST_DEF FALSE
+#define H5D_XFER_COLL_RANK0_BCAST_SIZE sizeof(bool)
+#define H5D_XFER_COLL_RANK0_BCAST_DEF false
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* type definitions */
diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c
index 24eb3ba..c2aac77 100644
--- a/testpar/t_2Gio.c
+++ b/testpar/t_2Gio.c
@@ -245,7 +245,7 @@ parse_options(int argc, char **argv)
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
+ hid_t ret_pl = H5I_INVALID_HID;
herr_t ret; /* generic return value */
int mpi_rank; /* mpi variables */
@@ -262,9 +262,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
VRFY((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ ret = H5Pset_coll_metadata_write(ret_pl, true);
VRFY((ret >= 0), "");
return (ret_pl);
}
@@ -3344,10 +3344,10 @@ test_actual_io_mode(int selection_mode)
H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
const char *filename;
const char *test_name;
- hbool_t direct_multi_chunk_io;
- hbool_t multi_chunk_io;
- hbool_t is_chunked;
- hbool_t is_collective;
+ bool direct_multi_chunk_io;
+ bool multi_chunk_io;
+ bool is_chunked;
+ bool is_collective;
int mpi_size = -1;
int mpi_rank = -1;
int length;
@@ -3355,16 +3355,16 @@ test_actual_io_mode(int selection_mode)
int i;
MPI_Comm mpi_comm = MPI_COMM_NULL;
MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t dataset = H5I_INVALID_HID;
hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_id = -1;
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t mem_space = H5I_INVALID_HID;
+ hid_t file_space = H5I_INVALID_HID;
+ hid_t dcpl = H5I_INVALID_HID;
+ hid_t dxpl_write = H5I_INVALID_HID;
+ hid_t dxpl_read = H5I_INVALID_HID;
hsize_t dims[MAX_RANK];
hsize_t chunk_dims[MAX_RANK];
hsize_t start[MAX_RANK];
@@ -3693,10 +3693,10 @@ test_actual_io_mode(int selection_mode)
/* Test values */
if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
- HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
- test_name);
+ snprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
+ test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
}
else {
@@ -3858,8 +3858,8 @@ test_no_collective_cause_mode(int selection_mode)
const char *filename;
const char *test_name;
- hbool_t is_chunked = 1;
- hbool_t is_independent = 0;
+ bool is_chunked = 1;
+ bool is_independent = 0;
int mpi_size = -1;
int mpi_rank = -1;
int length;
@@ -3867,17 +3867,17 @@ test_no_collective_cause_mode(int selection_mode)
int i;
MPI_Comm mpi_comm;
MPI_Info mpi_info;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t dataset = H5I_INVALID_HID;
hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_id = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dcpl = H5I_INVALID_HID;
+ hid_t dxpl_write = H5I_INVALID_HID;
+ hid_t dxpl_read = H5I_INVALID_HID;
hsize_t dims[MAX_RANK];
- hid_t mem_space = -1;
- hid_t file_space = -1;
+ hid_t mem_space = H5I_INVALID_HID;
+ hid_t file_space = H5I_INVALID_HID;
hsize_t chunk_dims[MAX_RANK];
herr_t ret;
/* set to global value as default */
@@ -4135,12 +4135,12 @@ test_no_collective_cause_mode(int selection_mode)
/* Test values */
memset(message, 0, sizeof(message));
- HDsnprintf(message, sizeof(message),
- "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message), "Local cause of Broken Collective I/O has the correct value for %s.\n",
+ test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
memset(message, 0, sizeof(message));
- HDsnprintf(message, sizeof(message),
- "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message),
+ "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -4278,6 +4278,8 @@ main(int argc, char **argv)
MPI_Comm_size(test_comm, &mpi_size);
MPI_Comm_rank(test_comm, &mpi_rank);
+ mpi_rank_framework_g = mpi_rank;
+
memset(filenames, 0, sizeof(filenames));
dim0 = BIG_X_FACTOR;
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 3a28022..2726f91 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1107,10 +1107,10 @@ single_rank_independent_io(void)
if (MAIN_PROCESS) {
hsize_t dims[1];
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dset_id = -1;
- hid_t fspace_id = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
herr_t ret;
int *data = NULL;
uint64_t i;
@@ -1181,7 +1181,7 @@ single_rank_independent_io(void)
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
+ hid_t ret_pl = H5I_INVALID_HID;
herr_t ret; /* generic return value */
int mpi_rank; /* mpi variables */
@@ -1198,9 +1198,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY_G((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
VRFY_G((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ ret = H5Pset_coll_metadata_write(ret_pl, true);
VRFY_G((ret >= 0), "");
return (ret_pl);
}
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 0d18851..0f845ef 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -31,9 +31,9 @@
#define BASE_ADDR (haddr_t)1024
-int nerrors = 0;
-int failures = 0;
-hbool_t verbose = TRUE; /* used to control error messages */
+int nerrors = 0;
+int failures = 0;
+bool verbose = true; /* used to control error messages */
#define NFILENAME 2
const char *FILENAME[NFILENAME] = {"CacheTestDummy", NULL};
@@ -41,15 +41,15 @@ const char *FILENAME[NFILENAME] = {"CacheTestDummy", NULL};
#define PATH_MAX 512
#endif /* !PATH_MAX */
char *filenames[NFILENAME];
-hid_t fapl; /* file access property list */
-haddr_t max_addr = 0; /* used to store the end of
- * the address space used by
- * the data array (see below).
- */
-hbool_t callbacks_verbose = FALSE; /* flag used to control whether
- * the callback functions are in
- * verbose mode.
- */
+hid_t fapl; /* file access property list */
+haddr_t max_addr = 0; /* used to store the end of
+ * the address space used by
+ * the data array (see below).
+ */
+bool callbacks_verbose = false; /* flag used to control whether
+ * the callback functions are in
+ * verbose mode.
+ */
int world_mpi_size = -1;
int world_mpi_rank = -1;
@@ -161,13 +161,13 @@ struct datum {
size_t len;
size_t local_len;
int ver;
- hbool_t dirty;
- hbool_t valid;
- hbool_t locked;
- hbool_t global_pinned;
- hbool_t local_pinned;
- hbool_t cleared;
- hbool_t flushed;
+ bool dirty;
+ bool valid;
+ bool locked;
+ bool global_pinned;
+ bool local_pinned;
+ bool cleared;
+ bool flushed;
int reads;
int writes;
int index;
@@ -234,7 +234,7 @@ int *data_index = NULL;
* reads and writes. Without some such mechanism, the test code contains
* race conditions that will frequently cause spurious failures.
*
- * When set to TRUE, DO_WRITE_REQ_ACK forces the server to send an ack after
+ * When set to true, DO_WRITE_REQ_ACK forces the server to send an ack after
* each write request, and the client to wait until the ack is received
* before proceeding. This was my first solution to the problem, and at
* first glance, it would seem to have a lot of unnecessary overhead.
@@ -251,13 +251,13 @@ int *data_index = NULL;
*
* Thus I have left code supporting the second solution in place.
*
- * Note that while one of these two #defines must be set to TRUE, there
- * should never be any need to set both of them to TRUE (although the
+ * Note that while one of these two #defines must be set to true, there
+ * should never be any need to set both of them to true (although the
* tests will still function with this setting).
*****************************************************************************/
-#define DO_WRITE_REQ_ACK TRUE
-#define DO_SYNC_AFTER_WRITE FALSE
+#define DO_WRITE_REQ_ACK true
+#define DO_SYNC_AFTER_WRITE false
/*****************************************************************************
* struct mssg
@@ -332,7 +332,7 @@ static void reset_stats(void);
/* MPI setup functions */
-static hbool_t set_up_file_communicator(void);
+static bool set_up_file_communicator(void);
/* data array manipulation functions */
@@ -347,29 +347,29 @@ static int get_max_nerrors(void);
/* mssg xfer related functions */
-static hbool_t recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset);
-static hbool_t send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag);
-static hbool_t setup_derived_types(void);
-static hbool_t takedown_derived_types(void);
+static bool recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset);
+static bool send_mssg(struct mssg_t *mssg_ptr, bool add_req_to_tag);
+static bool setup_derived_types(void);
+static bool takedown_derived_types(void);
/* server functions */
-static hbool_t reset_server_counters(void);
-static hbool_t server_main(void);
-static hbool_t serve_read_request(struct mssg_t *mssg_ptr);
-static hbool_t serve_sync_request(struct mssg_t *mssg_ptr);
-static hbool_t serve_write_request(struct mssg_t *mssg_ptr);
-static hbool_t serve_total_writes_request(struct mssg_t *mssg_ptr);
-static hbool_t serve_total_reads_request(struct mssg_t *mssg_ptr);
-static hbool_t serve_entry_writes_request(struct mssg_t *mssg_ptr);
-static hbool_t serve_entry_reads_request(struct mssg_t *mssg_ptr);
-static hbool_t serve_rw_count_reset_request(struct mssg_t *mssg_ptr);
+static bool reset_server_counters(void);
+static bool server_main(void);
+static bool serve_read_request(struct mssg_t *mssg_ptr);
+static bool serve_sync_request(struct mssg_t *mssg_ptr);
+static bool serve_write_request(struct mssg_t *mssg_ptr);
+static bool serve_total_writes_request(struct mssg_t *mssg_ptr);
+static bool serve_total_reads_request(struct mssg_t *mssg_ptr);
+static bool serve_entry_writes_request(struct mssg_t *mssg_ptr);
+static bool serve_entry_reads_request(struct mssg_t *mssg_ptr);
+static bool serve_rw_count_reset_request(struct mssg_t *mssg_ptr);
/* call back functions & related data structures */
static herr_t datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr);
-static void *datum_deserialize(const void *image_ptr, size_t len, void *udata_ptr, hbool_t *dirty_ptr);
+static void *datum_deserialize(const void *image_ptr, size_t len, void *udata_ptr, bool *dirty_ptr);
static herr_t datum_image_len(const void *thing, size_t *image_len_ptr);
@@ -417,45 +417,45 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] = {{
/* test utility functions */
-static void expunge_entry(H5F_t *file_ptr, int32_t idx);
-static void insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags);
-static void local_pin_and_unpin_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count,
- int max_count);
-static void local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx);
-static void local_unpin_all_entries(H5F_t *file_ptr, hbool_t via_unprotect);
-static int local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, hbool_t via_unprotect);
-static void lock_and_unlock_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count,
- int max_count);
-static void lock_and_unlock_random_entry(H5F_t *file_ptr, int min_idx, int max_idx);
-static void lock_entry(H5F_t *file_ptr, int32_t idx);
-static void mark_entry_dirty(int32_t idx);
-static void pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty);
-static void pin_protected_entry(int32_t idx, hbool_t global);
-static void move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx);
-static hbool_t reset_server_counts(void);
-static void resize_entry(int32_t idx, size_t new_size);
-static hbool_t setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr,
- int metadata_write_strategy);
-static void setup_rand(void);
-static hbool_t take_down_cache(hid_t fid, H5C_t *cache_ptr);
-static hbool_t verify_entry_reads(haddr_t addr, int expected_entry_reads);
-static hbool_t verify_entry_writes(haddr_t addr, int expected_entry_writes);
-static hbool_t verify_total_reads(int expected_total_reads);
-static hbool_t verify_total_writes(unsigned expected_total_writes);
-static void verify_writes(unsigned num_writes, haddr_t *written_entries_tbl);
-static void unlock_entry(H5F_t *file_ptr, int32_t type, unsigned int flags);
-static void unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t via_unprotect);
+static void expunge_entry(H5F_t *file_ptr, int32_t idx);
+static void insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags);
+static void local_pin_and_unpin_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count,
+ int max_count);
+static void local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx);
+static void local_unpin_all_entries(H5F_t *file_ptr, bool via_unprotect);
+static int local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, bool via_unprotect);
+static void lock_and_unlock_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count,
+ int max_count);
+static void lock_and_unlock_random_entry(H5F_t *file_ptr, int min_idx, int max_idx);
+static void lock_entry(H5F_t *file_ptr, int32_t idx);
+static void mark_entry_dirty(int32_t idx);
+static void pin_entry(H5F_t *file_ptr, int32_t idx, bool global, bool dirty);
+static void pin_protected_entry(int32_t idx, bool global);
+static void move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx);
+static bool reset_server_counts(void);
+static void resize_entry(int32_t idx, size_t new_size);
+static bool setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr,
+ int metadata_write_strategy);
+static void setup_rand(void);
+static bool take_down_cache(hid_t fid, H5C_t *cache_ptr);
+static bool verify_entry_reads(haddr_t addr, int expected_entry_reads);
+static bool verify_entry_writes(haddr_t addr, int expected_entry_writes);
+static bool verify_total_reads(int expected_total_reads);
+static bool verify_total_writes(unsigned expected_total_writes);
+static void verify_writes(unsigned num_writes, haddr_t *written_entries_tbl);
+static void unlock_entry(H5F_t *file_ptr, int32_t type, unsigned int flags);
+static void unpin_entry(H5F_t *file_ptr, int32_t idx, bool global, bool dirty, bool via_unprotect);
/* test functions */
-static hbool_t server_smoke_check(void);
-static hbool_t smoke_check_1(int metadata_write_strategy);
-static hbool_t smoke_check_2(int metadata_write_strategy);
-static hbool_t smoke_check_3(int metadata_write_strategy);
-static hbool_t smoke_check_4(int metadata_write_strategy);
-static hbool_t smoke_check_5(int metadata_write_strategy);
-static hbool_t smoke_check_6(int metadata_write_strategy);
-static hbool_t trace_file_check(int metadata_write_strategy);
+static bool server_smoke_check(void);
+static bool smoke_check_1(int metadata_write_strategy);
+static bool smoke_check_2(int metadata_write_strategy);
+static bool smoke_check_3(int metadata_write_strategy);
+static bool smoke_check_4(int metadata_write_strategy);
+static bool smoke_check_5(int metadata_write_strategy);
+static bool smoke_check_6(int metadata_write_strategy);
+static bool trace_file_check(int metadata_write_strategy);
/*****************************************************************************/
/****************************** stats functions ******************************/
@@ -529,16 +529,16 @@ reset_stats(void)
* Purpose: Create the MPI communicator used to open a HDF5 file with.
* In passing, also initialize the file_mpi... globals.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
set_up_file_communicator(void)
{
- hbool_t success = TRUE;
+ bool success = true;
int mpi_result;
int num_excluded_ranks;
int excluded_ranks[1];
@@ -552,7 +552,7 @@ set_up_file_communicator(void)
if (mpi_result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Comm_group() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
@@ -569,7 +569,7 @@ set_up_file_communicator(void)
if (mpi_result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Group_excl() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
@@ -584,7 +584,7 @@ set_up_file_communicator(void)
if (mpi_result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Comm_create() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
@@ -597,7 +597,7 @@ set_up_file_communicator(void)
if (file_mpi_comm == MPI_COMM_NULL) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n", world_mpi_rank, __func__);
}
@@ -610,7 +610,7 @@ set_up_file_communicator(void)
if (file_mpi_comm != MPI_COMM_NULL) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n", world_mpi_rank, __func__);
}
@@ -626,7 +626,7 @@ set_up_file_communicator(void)
if (mpi_result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Comm_size() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
@@ -641,7 +641,7 @@ set_up_file_communicator(void)
if (mpi_result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Comm_rank() failed with error %d.\n", world_mpi_rank, __func__,
mpi_result);
@@ -712,9 +712,9 @@ addr_to_datum_index(haddr_t base_addr)
* Purpose: Initialize the data array, from which cache entries are
* loaded.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
static void
@@ -743,13 +743,13 @@ init_data(void)
data[i].len = (size_t)(addr_offsets[j]);
data[i].local_len = (size_t)(addr_offsets[j]);
data[i].ver = 0;
- data[i].dirty = FALSE;
- data[i].valid = FALSE;
- data[i].locked = FALSE;
- data[i].global_pinned = FALSE;
- data[i].local_pinned = FALSE;
- data[i].cleared = FALSE;
- data[i].flushed = FALSE;
+ data[i].dirty = false;
+ data[i].valid = false;
+ data[i].locked = false;
+ data[i].global_pinned = false;
+ data[i].local_pinned = false;
+ data[i].cleared = false;
+ data[i].flushed = false;
data[i].reads = 0;
data[i].writes = 0;
data[i].index = i;
@@ -851,7 +851,7 @@ do_sync(void)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
if (verbose) {
@@ -928,18 +928,18 @@ get_max_nerrors(void)
* Purpose: Receive a message from any process in the provided instance
* of struct mssg.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
#define CACHE_TEST_TAG 99 /* different from any used by the library */
-static hbool_t
+static bool
recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
{
- hbool_t success = TRUE;
+ bool success = true;
int mssg_tag = CACHE_TEST_TAG;
int result;
MPI_Status status;
@@ -947,7 +947,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
if ((mssg_ptr == NULL) || (mssg_tag_offset < 0) || (mssg_tag_offset > MAX_REQ_CODE)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: bad param(s) on entry.\n", world_mpi_rank, __func__);
}
@@ -964,7 +964,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
if (result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Recv() failed.\n", world_mpi_rank, __func__);
}
@@ -972,7 +972,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
else if (mssg_ptr->magic != MSSG_MAGIC) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank, __func__);
}
@@ -980,7 +980,7 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
else if (mssg_ptr->src != status.MPI_SOURCE) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n", world_mpi_rank, __func__);
}
@@ -1000,15 +1000,15 @@ recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset)
* Note that all source and destination ranks are in the
* global communicator.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
-send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag)
+static bool
+send_mssg(struct mssg_t *mssg_ptr, bool add_req_to_tag)
{
- hbool_t success = TRUE;
+ bool success = true;
int mssg_tag = CACHE_TEST_TAG;
int result;
static long mssg_num = 0;
@@ -1018,7 +1018,7 @@ send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag)
(mssg_ptr->req > MAX_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Invalid mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1038,7 +1038,7 @@ send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag)
if (result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Send() failed.\n", world_mpi_rank, __func__);
}
@@ -1056,15 +1056,15 @@ send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag)
* Purpose: Set up the derived types used by the test bed. At present,
* only the mpi_mssg derived type is needed.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
setup_derived_types(void)
{
- hbool_t success = TRUE;
+ bool success = true;
int i;
int result;
MPI_Datatype mpi_types[9] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG, HADDR_AS_MPI_TYPE,
@@ -1087,7 +1087,7 @@ setup_derived_types(void)
(MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]))) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n", world_mpi_rank, __func__);
}
@@ -1107,7 +1107,7 @@ setup_derived_types(void)
if (result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n", world_mpi_rank, __func__);
}
@@ -1121,7 +1121,7 @@ setup_derived_types(void)
if (result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n", world_mpi_rank, __func__);
}
@@ -1139,16 +1139,16 @@ setup_derived_types(void)
* Purpose: take down the derived types used by the test bed. At present,
* only the mpi_mssg derived type is needed.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
takedown_derived_types(void)
{
- hbool_t success = TRUE;
- int result;
+ bool success = true;
+ int result;
if (mpi_mssg_t == MPI_DATATYPE_NULL)
return (success);
@@ -1158,7 +1158,7 @@ takedown_derived_types(void)
if (result != MPI_SUCCESS) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n", world_mpi_rank, __func__);
}
@@ -1179,18 +1179,18 @@ takedown_derived_types(void)
* Purpose: Reset the counters maintained by the server, doing a
* sanity check in passing.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
reset_server_counters(void)
{
- hbool_t success = TRUE;
- int i;
- long actual_total_reads = 0;
- long actual_total_writes = 0;
+ bool success = true;
+ int i;
+ long actual_total_reads = 0;
+ long actual_total_writes = 0;
for (i = 0; i < NUM_DATA_ENTRIES; i++) {
if (data[i].reads > 0) {
@@ -1208,7 +1208,7 @@ reset_server_counters(void)
if (actual_total_reads != total_reads) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%d).\n", world_mpi_rank, __func__,
@@ -1218,7 +1218,7 @@ reset_server_counters(void)
if (actual_total_writes != total_writes) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%d).\n", world_mpi_rank, __func__,
@@ -1244,23 +1244,23 @@ reset_server_counters(void)
* the test until the count of done messages received equals
* the number of client processes.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
server_main(void)
{
- hbool_t done = FALSE;
- hbool_t success = TRUE;
+ bool done = false;
+ bool success = true;
int done_count = 0;
struct mssg_t mssg;
if (world_mpi_rank != world_server_mpi_rank) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: This isn't the server process?!?!?\n", world_mpi_rank, __func__);
}
@@ -1277,7 +1277,7 @@ server_main(void)
break;
case WRITE_REQ_ACK_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received write ack?!?.\n", __func__);
break;
@@ -1287,7 +1287,7 @@ server_main(void)
break;
case READ_REQ_REPLY_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received read req reply?!?.\n", __func__);
break;
@@ -1297,7 +1297,7 @@ server_main(void)
break;
case SYNC_ACK_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received sync ack?!?.\n", __func__);
break;
@@ -1307,7 +1307,7 @@ server_main(void)
break;
case REQ_TTL_WRITES_RPLY_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received total writes reply?!?.\n", __func__);
break;
@@ -1317,7 +1317,7 @@ server_main(void)
break;
case REQ_TTL_READS_RPLY_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received total reads reply?!?.\n", __func__);
break;
@@ -1327,7 +1327,7 @@ server_main(void)
break;
case REQ_ENTRY_WRITES_RPLY_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received entry writes reply?!?.\n", __func__);
break;
@@ -1337,7 +1337,7 @@ server_main(void)
break;
case REQ_ENTRY_READS_RPLY_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received entry reads reply?!?.\n", __func__);
break;
@@ -1347,7 +1347,7 @@ server_main(void)
break;
case REQ_RW_COUNT_RESET_RPLY_CODE:
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%s: Received RW count reset reply?!?.\n", __func__);
break;
@@ -1355,12 +1355,12 @@ server_main(void)
case DONE_REQ_CODE:
done_count++;
if (done_count >= file_mpi_size)
- done = TRUE;
+ done = true;
break;
default:
nerrors++;
- success = FALSE;
+ success = false;
if (verbose)
fprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, __func__);
break;
@@ -1383,16 +1383,16 @@ server_main(void)
* a copy of the indicated datum from the data array to
* the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_read_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
int target_index;
haddr_t target_addr;
struct mssg_t reply;
@@ -1400,7 +1400,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
if ((mssg_ptr == NULL) || (mssg_ptr->req != READ_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1414,7 +1414,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
if (target_index < 0) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
@@ -1423,7 +1423,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
else if (data[target_index].len != mssg_ptr->len) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, __func__,
data[target_index].len, mssg_ptr->len);
@@ -1432,7 +1432,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
else if (!(data[target_index].valid)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout,
"%d:%s: proc %d read invalid entry. "
@@ -1461,7 +1461,7 @@ serve_read_request(struct mssg_t *mssg_ptr)
if (success) {
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
}
if (report_mssg) {
@@ -1498,22 +1498,22 @@ serve_read_request(struct mssg_t *mssg_ptr)
* that all previous messages have been processed before
* proceeding.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_sync_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
struct mssg_t reply;
if ((mssg_ptr == NULL) || (mssg_ptr->req != SYNC_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1535,7 +1535,7 @@ serve_sync_request(struct mssg_t *mssg_ptr)
if (success) {
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
}
if (report_mssg) {
@@ -1565,16 +1565,16 @@ serve_sync_request(struct mssg_t *mssg_ptr)
* the version number of the target data array entry as
* specified in the message.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_write_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
int target_index;
int new_ver_num = 0;
haddr_t target_addr;
@@ -1585,7 +1585,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
if ((mssg_ptr == NULL) || (mssg_ptr->req != WRITE_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1599,7 +1599,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
if (target_index < 0) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
@@ -1608,7 +1608,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
else if (data[target_index].len != mssg_ptr->len) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, __func__,
data[target_index].len, mssg_ptr->len);
@@ -1624,7 +1624,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
if (new_ver_num <= data[target_index].ver) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n", world_mpi_rank, __func__,
new_ver_num, data[target_index].ver);
@@ -1636,7 +1636,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
/* process the write */
data[target_index].ver = new_ver_num;
- data[target_index].valid = TRUE;
+ data[target_index].valid = true;
/* and update the counters */
total_writes++;
@@ -1656,7 +1656,7 @@ serve_write_request(struct mssg_t *mssg_ptr)
reply.magic = MSSG_MAGIC;
/* and send it */
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
#endif /* DO_WRITE_REQ_ACK */
}
@@ -1693,22 +1693,22 @@ serve_write_request(struct mssg_t *mssg_ptr)
* the current value of the total_writes global variable to
* the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_total_writes_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
struct mssg_t reply;
if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_TTL_WRITES_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1730,7 +1730,7 @@ serve_total_writes_request(struct mssg_t *mssg_ptr)
if (success) {
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
}
if (report_mssg) {
@@ -1761,22 +1761,22 @@ serve_total_writes_request(struct mssg_t *mssg_ptr)
* the current value of the total_reads global variable to
* the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_total_reads_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
struct mssg_t reply;
if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_TTL_READS_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1798,7 +1798,7 @@ serve_total_reads_request(struct mssg_t *mssg_ptr)
if (success) {
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
}
if (report_mssg) {
@@ -1829,16 +1829,16 @@ serve_total_reads_request(struct mssg_t *mssg_ptr)
* written since the last counter reset to the requesting
* process.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_entry_writes_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
int target_index;
haddr_t target_addr;
struct mssg_t reply;
@@ -1846,7 +1846,7 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_ENTRY_WRITES_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1860,7 +1860,7 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
if (target_index < 0) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
@@ -1883,7 +1883,7 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
if (success) {
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
}
if (report_mssg) {
@@ -1916,16 +1916,16 @@ serve_entry_writes_request(struct mssg_t *mssg_ptr)
* read since the last counter reset to the requesting
* process.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_entry_reads_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
int target_index;
haddr_t target_addr;
struct mssg_t reply;
@@ -1933,7 +1933,7 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_ENTRY_READS_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -1947,7 +1947,7 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
if (target_index < 0) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__,
target_addr);
@@ -1970,7 +1970,7 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
if (success) {
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
}
if (report_mssg) {
@@ -2002,22 +2002,22 @@ serve_entry_reads_request(struct mssg_t *mssg_ptr)
* read/write counters, and sends a confirmation message to
* the calling process.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
serve_rw_count_reset_request(struct mssg_t *mssg_ptr)
{
- hbool_t report_mssg = FALSE;
- hbool_t success = TRUE;
+ bool report_mssg = false;
+ bool success = true;
struct mssg_t reply;
if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_RW_COUNT_RESET_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__);
}
@@ -2044,7 +2044,7 @@ serve_rw_count_reset_request(struct mssg_t *mssg_ptr)
if (success) {
- success = send_mssg(&reply, TRUE);
+ success = send_mssg(&reply, true);
}
if (report_mssg) {
@@ -2122,10 +2122,10 @@ datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr)
*/
static void *
datum_deserialize(const void H5_ATTR_NDEBUG_UNUSED *image_ptr, H5_ATTR_UNUSED size_t len, void *udata_ptr,
- hbool_t *dirty_ptr)
+ bool *dirty_ptr)
{
haddr_t addr = *(haddr_t *)udata_ptr;
- hbool_t success = TRUE;
+ bool success = true;
int idx;
struct datum *entry_ptr = NULL;
@@ -2152,7 +2152,7 @@ datum_deserialize(const void H5_ATTR_NDEBUG_UNUSED *image_ptr, H5_ATTR_UNUSED si
fflush(stdout);
}
- *dirty_ptr = FALSE;
+ *dirty_ptr = false;
if (!success) {
@@ -2285,7 +2285,7 @@ datum_serialize(const H5F_t *f, void H5_ATTR_NDEBUG_UNUSED *image_ptr, size_t le
static herr_t
datum_notify(H5C_notify_action_t action, void *thing)
{
- hbool_t was_dirty = FALSE;
+ bool was_dirty = false;
herr_t ret_value = SUCCEED;
struct datum *entry_ptr;
struct H5AC_aux_t *aux_ptr;
@@ -2349,7 +2349,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
ret_value = FAIL;
@@ -2446,7 +2446,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
else {
entry_ptr->ver = mssg.ver;
- entry_ptr->dirty = FALSE;
+ entry_ptr->dirty = false;
datum_loads++;
}
}
@@ -2478,7 +2478,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
if (entry_ptr->header.is_dirty) {
- was_dirty = TRUE; /* so we will receive the ack
+ was_dirty = true; /* so we will receive the ack
* if requested
*/
@@ -2493,7 +2493,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
ret_value = FAIL;
@@ -2502,8 +2502,8 @@ datum_notify(H5C_notify_action_t action, void *thing)
}
}
else {
- entry_ptr->dirty = FALSE;
- entry_ptr->flushed = TRUE;
+ entry_ptr->dirty = false;
+ entry_ptr->flushed = true;
}
}
}
@@ -2574,8 +2574,8 @@ datum_notify(H5C_notify_action_t action, void *thing)
fflush(stdout);
}
- entry_ptr->cleared = TRUE;
- entry_ptr->dirty = FALSE;
+ entry_ptr->cleared = true;
+ entry_ptr->dirty = false;
datum_clears++;
@@ -2711,7 +2711,7 @@ datum_free_icr(void *thing)
static void
expunge_entry(H5F_t *file_ptr, int32_t idx)
{
- hbool_t in_cache;
+ bool in_cache;
herr_t result;
struct datum *entry_ptr;
@@ -2725,7 +2725,7 @@ expunge_entry(H5F_t *file_ptr, int32_t idx)
assert(!(entry_ptr->global_pinned));
assert(!(entry_ptr->local_pinned));
- entry_ptr->dirty = FALSE;
+ entry_ptr->dirty = false;
if (nerrors == 0) {
@@ -2779,7 +2779,7 @@ expunge_entry(H5F_t *file_ptr, int32_t idx)
static void
insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags)
{
- hbool_t insert_pinned;
+ bool insert_pinned;
herr_t result;
struct datum *entry_ptr;
@@ -2797,7 +2797,7 @@ insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags)
if (nerrors == 0) {
(entry_ptr->ver)++;
- entry_ptr->dirty = TRUE;
+ entry_ptr->dirty = true;
result = H5AC_insert_entry(file_ptr, &(types[0]), entry_ptr->base_addr,
(void *)(&(entry_ptr->header)), flags);
@@ -2836,13 +2836,13 @@ insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags)
if (insert_pinned) {
assert(entry_ptr->header.is_pinned);
- entry_ptr->global_pinned = TRUE;
+ entry_ptr->global_pinned = true;
global_pins++;
}
else {
assert(!(entry_ptr->header.is_pinned));
- entry_ptr->global_pinned = FALSE;
+ entry_ptr->global_pinned = false;
}
/* assert( entry_ptr->header.is_dirty ); */
@@ -2870,10 +2870,10 @@ local_pin_and_unpin_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, in
if (nerrors == 0) {
- hbool_t via_unprotect;
- int count;
- int i;
- int idx;
+ bool via_unprotect;
+ int count;
+ int i;
+ int idx;
assert(file_ptr);
assert(0 <= min_idx);
@@ -2943,7 +2943,7 @@ local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx)
assert(idx <= max_idx);
} while (data[idx].global_pinned || data[idx].local_pinned);
- pin_entry(file_ptr, idx, FALSE, FALSE);
+ pin_entry(file_ptr, idx, false, false);
}
return;
@@ -2961,7 +2961,7 @@ local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx)
*
*****************************************************************************/
static void
-local_unpin_all_entries(H5F_t *file_ptr, hbool_t via_unprotect)
+local_unpin_all_entries(H5F_t *file_ptr, bool via_unprotect)
{
if (nerrors == 0) {
@@ -2995,7 +2995,7 @@ local_unpin_all_entries(H5F_t *file_ptr, hbool_t via_unprotect)
*
*****************************************************************************/
static int
-local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, hbool_t via_unprotect)
+local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, bool via_unprotect)
{
int i = 0;
int idx = -1;
@@ -3019,7 +3019,7 @@ local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, hbool_t via_unprot
if (data[idx].local_pinned) {
- unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect);
+ unpin_entry(file_ptr, idx, false, false, via_unprotect);
}
else {
@@ -3146,7 +3146,7 @@ lock_entry(H5F_t *file_ptr, int32_t idx)
}
else {
- entry_ptr->locked = TRUE;
+ entry_ptr->locked = true;
}
assert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
@@ -3183,7 +3183,7 @@ mark_entry_dirty(int32_t idx)
assert(!(entry_ptr->local_pinned));
(entry_ptr->ver)++;
- entry_ptr->dirty = TRUE;
+ entry_ptr->dirty = true;
result = H5AC_mark_entry_dirty((void *)entry_ptr);
@@ -3214,7 +3214,7 @@ mark_entry_dirty(int32_t idx)
*
*****************************************************************************/
static void
-pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty)
+pin_entry(H5F_t *file_ptr, int32_t idx, bool global, bool dirty)
{
unsigned int flags = H5AC__PIN_ENTRY_FLAG;
struct datum *entry_ptr;
@@ -3245,13 +3245,13 @@ pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty)
if (global) {
- entry_ptr->global_pinned = TRUE;
+ entry_ptr->global_pinned = true;
global_pins++;
}
else {
- entry_ptr->local_pinned = TRUE;
+ entry_ptr->local_pinned = true;
local_pins++;
}
@@ -3273,7 +3273,7 @@ pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty)
*
*****************************************************************************/
static void
-pin_protected_entry(int32_t idx, hbool_t global)
+pin_protected_entry(int32_t idx, bool global)
{
herr_t result;
struct datum *entry_ptr;
@@ -3302,13 +3302,13 @@ pin_protected_entry(int32_t idx, hbool_t global)
if (global) {
- entry_ptr->global_pinned = TRUE;
+ entry_ptr->global_pinned = true;
global_pins++;
}
else {
- entry_ptr->local_pinned = TRUE;
+ entry_ptr->local_pinned = true;
local_pins++;
}
@@ -3364,7 +3364,7 @@ move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx)
new_addr = new_entry_ptr->base_addr;
/* Moving will mark the entry dirty if it is not already */
- old_entry_ptr->dirty = TRUE;
+ old_entry_ptr->dirty = true;
/* touch up versions, base_addrs, and data_index. Do this
* now as it is possible that the rename will trigger a
@@ -3441,15 +3441,15 @@ move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx)
* Purpose: Send a message to the server process requesting it to reset
* its counters. Await confirmation message.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
reset_server_counts(void)
{
- hbool_t success = TRUE; /* will set to FALSE if appropriate. */
+ bool success = true; /* will set to false if appropriate. */
struct mssg_t mssg;
if (success) {
@@ -3465,10 +3465,10 @@ reset_server_counts(void)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -3480,7 +3480,7 @@ reset_server_counts(void)
if (!recv_mssg(&mssg, REQ_RW_COUNT_RESET_RPLY_CODE)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -3490,7 +3490,7 @@ reset_server_counts(void)
(mssg.ver != 0) || (mssg.count != 0) || (mssg.magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad data in req r/w counter reset reply.\n", world_mpi_rank,
__func__);
@@ -3550,7 +3550,7 @@ resize_entry(int32_t idx, size_t new_size)
assert(entry_ptr->header.is_dirty);
assert(entry_ptr->header.size == new_size);
- entry_ptr->dirty = TRUE;
+ entry_ptr->dirty = true;
entry_ptr->local_len = new_size;
/* touch up version. */
@@ -3575,17 +3575,17 @@ resize_entry(int32_t idx, size_t new_size)
* look up the address of the metadata cache, and then instruct
* the cache to omit sanity checks on dxpl IDs.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, int metadata_write_strategy)
{
- hbool_t success = FALSE; /* will set to TRUE if appropriate. */
- hbool_t enable_rpt_fcn = FALSE;
- hid_t fid = -1;
+ bool success = false; /* will set to true if appropriate. */
+ bool enable_rpt_fcn = false;
+ hid_t fid = H5I_INVALID_HID;
H5AC_cache_config_t config;
H5AC_cache_config_t test_config;
H5F_t *file_ptr = NULL;
@@ -3634,12 +3634,12 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
}
}
else {
- cache_ptr->ignore_tags = TRUE;
+ cache_ptr->ignore_tags = true;
*fid_ptr = fid;
*file_ptr_ptr = file_ptr;
*cache_ptr_ptr = cache_ptr;
H5C_stats__reset(cache_ptr);
- success = TRUE;
+ success = true;
}
if (success) {
@@ -3727,7 +3727,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
if (actual_base_addr == HADDR_UNDEF) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
@@ -3740,7 +3740,7 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
* actual_base_addr is <= BASE_ADDR. This should only happen
* if the size of the superblock is increase.
*/
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
@@ -3819,9 +3819,9 @@ setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr
static void
verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
{
- const hbool_t report = FALSE;
- hbool_t proceed = TRUE;
- unsigned u = 0;
+ const bool report = false;
+ bool proceed = true;
+ unsigned u = 0;
assert(world_mpi_rank != world_server_mpi_rank);
assert((num_writes == 0) || (written_entries_tbl != NULL));
@@ -3833,7 +3833,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- proceed = FALSE;
+ proceed = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, __func__);
@@ -3856,7 +3856,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- proceed = FALSE;
+ proceed = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, __func__);
@@ -3892,7 +3892,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- proceed = FALSE;
+ proceed = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, __func__);
@@ -3919,7 +3919,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
static void
setup_rand(void)
{
- hbool_t use_predefined_seeds = FALSE;
+ bool use_predefined_seeds = false;
int num_predefined_seeds = 3;
unsigned predefined_seeds[3] = {18669, 89925, 12577};
unsigned seed;
@@ -3967,22 +3967,22 @@ setup_rand(void)
* To do this, we must close the file, and delete if if
* possible.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
take_down_cache(hid_t fid, H5C_t *cache_ptr)
{
- hbool_t success = TRUE; /* will set to FALSE if appropriate. */
+ bool success = true; /* will set to false if appropriate. */
/* flush the file -- this should write out any remaining test
* entries in the cache.
*/
if ((success) && (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__);
@@ -3998,7 +3998,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
if (H5AC__set_sync_point_done_callback(cache_ptr, NULL) != SUCCEED) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank,
@@ -4010,7 +4010,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
/* close the file */
if ((success) && (H5Fclose(fid) < 0)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: H5Fclose() failed.\n", world_mpi_rank, __func__);
@@ -4018,7 +4018,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
}
/* Pop API context */
- H5CX_pop(FALSE);
+ H5CX_pop(false);
if (success) {
@@ -4026,7 +4026,7 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
if (HDremove(filenames[0]) < 0) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: HDremove() failed.\n", world_mpi_rank, __func__);
@@ -4053,20 +4053,20 @@ take_down_cache(hid_t fid, H5C_t *cache_ptr)
* indicated entry has been read since the last time the
* server counters were reset.
*
- * Return TRUE if successful, and if the supplied expected
+ * Return true if successful, and if the supplied expected
* number of reads matches the number of reads reported by
* the server process.
*
- * Return FALSE and flag an error otherwise.
+ * Return false and flag an error otherwise.
*
- * Return: TRUE if successful, FALSE otherwise.
+ * Return: true if successful, false otherwise.
*
*-------------------------------------------------------------------------
*/
-static hbool_t
+static bool
verify_entry_reads(haddr_t addr, int expected_entry_reads)
{
- hbool_t success = TRUE;
+ bool success = true;
int reported_entry_reads = 0;
struct mssg_t mssg;
@@ -4083,10 +4083,10 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4098,7 +4098,7 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
if (!recv_mssg(&mssg, REQ_ENTRY_READS_RPLY_CODE)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4112,7 +4112,7 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
(mssg.magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n", world_mpi_rank, __func__);
}
@@ -4128,7 +4128,7 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
if (reported_entry_reads != expected_entry_reads) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: rep/exp entry 0x%" PRIxHADDR " reads mismatch (%d/%d).\n",
world_mpi_rank, __func__, addr, reported_entry_reads, expected_entry_reads);
@@ -4147,20 +4147,20 @@ verify_entry_reads(haddr_t addr, int expected_entry_reads)
* indicated entry has been written since the last time the
* server counters were reset.
*
- * Return TRUE if successful, and if the supplied expected
+ * Return true if successful, and if the supplied expected
* number of reads matches the number of reads reported by
* the server process.
*
- * Return FALSE and flag an error otherwise.
+ * Return false and flag an error otherwise.
*
- * Return: TRUE if successful, FALSE otherwise.
+ * Return: true if successful, false otherwise.
*
*-------------------------------------------------------------------------
*/
-static hbool_t
+static bool
verify_entry_writes(haddr_t addr, int expected_entry_writes)
{
- hbool_t success = TRUE;
+ bool success = true;
int reported_entry_writes = 0;
struct mssg_t mssg;
@@ -4177,10 +4177,10 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4192,7 +4192,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
if (!recv_mssg(&mssg, REQ_ENTRY_WRITES_RPLY_CODE)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4206,7 +4206,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
(mssg.magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n", world_mpi_rank, __func__);
}
@@ -4222,7 +4222,7 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
if (reported_entry_writes != expected_entry_writes) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: rep/exp entry 0x%llx writes mismatch (%d/%d).\n", world_mpi_rank,
__func__, (long long)addr, reported_entry_writes, expected_entry_writes);
@@ -4242,19 +4242,19 @@ verify_entry_writes(haddr_t addr, int expected_entry_writes)
* server counter reset, and compare this value with the supplied
* expected value.
*
- * If the values match, return TRUE.
+ * If the values match, return true.
*
- * If the values don't match, flag an error and return FALSE.
+ * If the values don't match, flag an error and return false.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
verify_total_reads(int expected_total_reads)
{
- hbool_t success = TRUE; /* will set to FALSE if appropriate. */
+ bool success = true; /* will set to false if appropriate. */
long reported_total_reads;
struct mssg_t mssg;
@@ -4271,10 +4271,10 @@ verify_total_reads(int expected_total_reads)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4286,7 +4286,7 @@ verify_total_reads(int expected_total_reads)
if (!recv_mssg(&mssg, REQ_TTL_READS_RPLY_CODE)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4296,7 +4296,7 @@ verify_total_reads(int expected_total_reads)
(mssg.ver != 0) || (mssg.magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, __func__);
}
@@ -4312,7 +4312,7 @@ verify_total_reads(int expected_total_reads)
if (reported_total_reads != expected_total_reads) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: reported/expected total reads mismatch (%ld/%d).\n", world_mpi_rank,
__func__, reported_total_reads, expected_total_reads);
@@ -4332,19 +4332,19 @@ verify_total_reads(int expected_total_reads)
* server counter reset, and compare this value with the supplied
* expected value.
*
- * If the values match, return TRUE.
+ * If the values match, return true.
*
- * If the values don't match, flag an error and return FALSE.
+ * If the values don't match, flag an error and return false.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
verify_total_writes(unsigned expected_total_writes)
{
- hbool_t success = TRUE; /* will set to FALSE if appropriate. */
+ bool success = true; /* will set to false if appropriate. */
unsigned reported_total_writes;
struct mssg_t mssg;
@@ -4361,10 +4361,10 @@ verify_total_writes(unsigned expected_total_writes)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if (!send_mssg(&mssg, FALSE)) {
+ if (!send_mssg(&mssg, false)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4376,7 +4376,7 @@ verify_total_writes(unsigned expected_total_writes)
if (!recv_mssg(&mssg, REQ_TTL_WRITES_RPLY_CODE)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__);
}
@@ -4386,7 +4386,7 @@ verify_total_writes(unsigned expected_total_writes)
(mssg.ver != 0) || (mssg.magic != MSSG_MAGIC)) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, __func__);
}
@@ -4402,7 +4402,7 @@ verify_total_writes(unsigned expected_total_writes)
if (reported_total_writes != expected_total_writes) {
nerrors++;
- success = FALSE;
+ success = false;
if (verbose) {
fprintf(stdout, "%d:%s: reported/expected total writes mismatch (%u/%u).\n", world_mpi_rank,
__func__, reported_total_writes, expected_total_writes);
@@ -4446,7 +4446,7 @@ unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags)
if (dirtied) {
(entry_ptr->ver)++;
- entry_ptr->dirty = TRUE;
+ entry_ptr->dirty = true;
}
result = H5AC_unprotect(file_ptr, &(types[0]), entry_ptr->base_addr, (void *)(&(entry_ptr->header)),
@@ -4464,7 +4464,7 @@ unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags)
}
else {
- entry_ptr->locked = FALSE;
+ entry_ptr->locked = false;
}
assert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE);
@@ -4492,7 +4492,7 @@ unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags)
*
*****************************************************************************/
static void
-unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t via_unprotect)
+unpin_entry(H5F_t *file_ptr, int32_t idx, bool global, bool dirty, bool via_unprotect)
{
herr_t result;
unsigned int flags = H5AC__UNPIN_ENTRY_FLAG;
@@ -4544,11 +4544,11 @@ unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t
if (global) {
- entry_ptr->global_pinned = FALSE;
+ entry_ptr->global_pinned = false;
}
else {
- entry_ptr->local_pinned = FALSE;
+ entry_ptr->local_pinned = false;
}
}
@@ -4566,15 +4566,15 @@ unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t
*
* Purpose: Quick smoke check for the server process.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
server_smoke_check(void)
{
- hbool_t success = TRUE;
+ bool success = true;
int max_nerrors;
struct mssg_t mssg;
@@ -4611,7 +4611,7 @@ server_smoke_check(void)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if (!(success = send_mssg(&mssg, FALSE))) {
+ if (!(success = send_mssg(&mssg, false))) {
nerrors++;
if (verbose) {
@@ -4643,7 +4643,7 @@ server_smoke_check(void)
(mssg.len != data[world_mpi_rank].len) || (mssg.ver != data[world_mpi_rank].ver) ||
(mssg.magic != MSSG_MAGIC)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, __func__);
@@ -4658,7 +4658,7 @@ server_smoke_check(void)
/* barrier to allow all writes to complete */
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, __func__);
@@ -4689,7 +4689,7 @@ server_smoke_check(void)
/* barrier to allow all writes to complete */
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
@@ -4710,7 +4710,7 @@ server_smoke_check(void)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -4743,7 +4743,7 @@ server_smoke_check(void)
(mssg.len != data[world_mpi_rank].len) || (mssg.ver != data[world_mpi_rank].ver) ||
(mssg.magic != MSSG_MAGIC)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, __func__);
@@ -4754,7 +4754,7 @@ server_smoke_check(void)
/* barrier to allow all writes to complete */
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, __func__);
@@ -4784,7 +4784,7 @@ server_smoke_check(void)
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
@@ -4800,7 +4800,7 @@ server_smoke_check(void)
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
@@ -4831,7 +4831,7 @@ server_smoke_check(void)
if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) {
- success = FALSE;
+ success = false;
nerrors++;
if (verbose) {
@@ -4852,7 +4852,7 @@ server_smoke_check(void)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -4891,18 +4891,18 @@ server_smoke_check(void)
*
* Purpose: First smoke check for the parallel cache.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
smoke_check_1(int metadata_write_strategy)
{
- hbool_t success = TRUE;
+ bool success = true;
int i;
int max_nerrors;
- hid_t fid = -1;
+ hid_t fid = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
struct mssg_t mssg;
@@ -5011,7 +5011,7 @@ smoke_check_1(int metadata_write_strategy)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -5053,18 +5053,18 @@ smoke_check_1(int metadata_write_strategy)
* Introduce random reads, but keep all processes with roughly
* the same work load.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
smoke_check_2(int metadata_write_strategy)
{
- hbool_t success = TRUE;
+ bool success = true;
int i;
int max_nerrors;
- hid_t fid = -1;
+ hid_t fid = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
struct mssg_t mssg;
@@ -5129,10 +5129,10 @@ smoke_check_2(int metadata_write_strategy)
for (i = 0; i < (virt_num_data_entries / 2); i += 61) {
/* Make sure we don't step on any locally pinned entries */
if (data[i].local_pinned) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, false, false, false);
}
- pin_entry(file_ptr, i, TRUE, FALSE);
+ pin_entry(file_ptr, i, true, false);
}
for (i = (virt_num_data_entries / 2) - 1; i >= 0; i -= 2) {
@@ -5149,7 +5149,7 @@ smoke_check_2(int metadata_write_strategy)
}
/* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ local_unpin_all_entries(file_ptr, false);
/* Move the first half of the entries... */
for (i = 0; i < (virt_num_data_entries / 2); i++) {
@@ -5168,10 +5168,10 @@ smoke_check_2(int metadata_write_strategy)
}
for (i = 0; i < (virt_num_data_entries / 2); i += 61) {
- hbool_t via_unprotect = ((((unsigned)i) & 0x01) == 0);
- hbool_t dirty = ((((unsigned)i) & 0x02) == 0);
+ bool via_unprotect = ((((unsigned)i) & 0x01) == 0);
+ bool dirty = ((((unsigned)i) & 0x02) == 0);
- unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
+ unpin_entry(file_ptr, i, true, dirty, via_unprotect);
}
if (fid >= 0) {
@@ -5207,7 +5207,7 @@ smoke_check_2(int metadata_write_strategy)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -5252,22 +5252,22 @@ smoke_check_2(int metadata_write_strategy)
* In this test, load process 0 heavily, and the other
* processes lightly.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
smoke_check_3(int metadata_write_strategy)
{
- hbool_t success = TRUE;
+ bool success = true;
int i;
int max_nerrors;
int min_count;
int max_count;
int min_idx;
int max_idx;
- hid_t fid = -1;
+ hid_t fid = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
struct mssg_t mssg;
@@ -5341,13 +5341,13 @@ smoke_check_3(int metadata_write_strategy)
if (i % 59 == 0) {
- hbool_t dirty = ((i % 2) == 0);
+ bool dirty = ((i % 2) == 0);
if (data[i].local_pinned) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, false, false, false);
}
- pin_entry(file_ptr, i, TRUE, dirty);
+ pin_entry(file_ptr, i, true, dirty);
assert(!dirty || data[i].header.is_dirty);
assert(data[i].header.is_pinned);
@@ -5384,13 +5384,13 @@ smoke_check_3(int metadata_write_strategy)
for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
if ((i >= (virt_num_data_entries / 4)) && (i % 59 == 0)) {
- hbool_t via_unprotect = ((((unsigned)i) & 0x02) == 0);
- hbool_t dirty = ((((unsigned)i) & 0x04) == 0);
+ bool via_unprotect = ((((unsigned)i) & 0x02) == 0);
+ bool dirty = ((((unsigned)i) & 0x04) == 0);
assert(data[i].global_pinned);
assert(!data[i].local_pinned);
- unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
+ unpin_entry(file_ptr, i, true, dirty, via_unprotect);
}
if (i % 2 == 0) {
@@ -5415,7 +5415,7 @@ smoke_check_3(int metadata_write_strategy)
}
/* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ local_unpin_all_entries(file_ptr, false);
min_count = 10 / (file_mpi_rank + 1);
max_count = min_count + 100;
@@ -5455,7 +5455,7 @@ smoke_check_3(int metadata_write_strategy)
}
/* release any local pins before we take down the cache. */
- local_unpin_all_entries(file_ptr, FALSE);
+ local_unpin_all_entries(file_ptr, false);
if (fid >= 0) {
@@ -5490,7 +5490,7 @@ smoke_check_3(int metadata_write_strategy)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -5535,22 +5535,22 @@ smoke_check_3(int metadata_write_strategy)
* In this test, load process 0 lightly, and the other
* processes heavily.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
smoke_check_4(int metadata_write_strategy)
{
- hbool_t success = TRUE;
+ bool success = true;
int i;
int max_nerrors;
int min_count;
int max_count;
int min_idx;
int max_idx;
- hid_t fid = -1;
+ hid_t fid = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
struct mssg_t mssg;
@@ -5631,18 +5631,18 @@ smoke_check_4(int metadata_write_strategy)
* entries are in fact pinned (which unpin_entry() should do).
*/
insert_entry(cache_ptr, file_ptr, i, H5C__PIN_ENTRY_FLAG);
- unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, true, false, false);
}
if (i % 59 == 0) {
- hbool_t dirty = ((i % 2) == 0);
+ bool dirty = ((i % 2) == 0);
if (data[i].local_pinned) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, false, false, false);
}
- pin_entry(file_ptr, i, TRUE, dirty);
+ pin_entry(file_ptr, i, true, dirty);
assert(!dirty || data[i].header.is_dirty);
assert(data[i].header.is_pinned);
@@ -5675,13 +5675,13 @@ smoke_check_4(int metadata_write_strategy)
for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) {
if ((i >= (virt_num_data_entries / 4)) && (i % 59 == 0)) {
- hbool_t via_unprotect = ((((unsigned)i) & 0x02) == 0);
- hbool_t dirty = ((((unsigned)i) & 0x04) == 0);
+ bool via_unprotect = ((((unsigned)i) & 0x02) == 0);
+ bool dirty = ((((unsigned)i) & 0x04) == 0);
assert(data[i].global_pinned);
assert(!data[i].local_pinned);
- unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
+ unpin_entry(file_ptr, i, true, dirty, via_unprotect);
}
if (i % 2 == 0) {
@@ -5702,7 +5702,7 @@ smoke_check_4(int metadata_write_strategy)
}
/* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ local_unpin_all_entries(file_ptr, false);
min_count = 10 * (file_mpi_rank % 4);
max_count = min_count + 100;
@@ -5772,7 +5772,7 @@ smoke_check_4(int metadata_write_strategy)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -5812,18 +5812,18 @@ smoke_check_4(int metadata_write_strategy)
* Purpose: Similar to smoke check 1, but modified to verify that
* H5AC_mark_entry_dirty() works in the parallel case.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
smoke_check_5(int metadata_write_strategy)
{
- hbool_t success = TRUE;
+ bool success = true;
int i;
int max_nerrors;
- hid_t fid = -1;
+ hid_t fid = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
struct mssg_t mssg;
@@ -5910,7 +5910,7 @@ smoke_check_5(int metadata_write_strategy)
}
for (i = (virt_num_data_entries / 2) - 1; i >= (virt_num_data_entries / 4); i--) {
- pin_entry(file_ptr, i, TRUE, FALSE);
+ pin_entry(file_ptr, i, true, false);
if (i % 2 == 0) {
if (i % 8 <= 4) {
@@ -5926,7 +5926,7 @@ smoke_check_5(int metadata_write_strategy)
}
}
- unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, true, false, false);
}
if (fid >= 0) {
@@ -5962,7 +5962,7 @@ smoke_check_5(int metadata_write_strategy)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -6025,15 +6025,15 @@ smoke_check_5(int metadata_write_strategy)
* - H5AC_expunge_entry()
* - H5AC_resize_entry()
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
trace_file_check(int metadata_write_strategy)
{
- hbool_t success = TRUE;
+ bool success = true;
const char *((*expected_output)[]) = NULL;
const char *expected_output_0[] = {"### HDF5 metadata cache trace file version 1 ###\n",
@@ -6090,12 +6090,12 @@ trace_file_check(int metadata_write_strategy)
NULL};
char buffer[256];
char trace_file_name[64];
- hbool_t done = FALSE;
+ bool done = false;
int i;
int max_nerrors;
size_t expected_line_len;
size_t actual_line_len;
- hid_t fid = -1;
+ hid_t fid = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
FILE *trace_file_ptr = NULL;
@@ -6168,7 +6168,7 @@ trace_file_check(int metadata_write_strategy)
__func__);
}
else {
- config.open_trace_file = TRUE;
+ config.open_trace_file = true;
strcpy(config.trace_file_name, "t_cache_trace.txt");
if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
@@ -6189,19 +6189,19 @@ trace_file_check(int metadata_write_strategy)
unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET);
lock_entry(file_ptr, 1);
- pin_protected_entry(1, TRUE);
+ pin_protected_entry(1, true);
unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET);
- unpin_entry(file_ptr, 1, TRUE, FALSE, FALSE);
+ unpin_entry(file_ptr, 1, true, false, false);
expunge_entry(file_ptr, 1);
lock_entry(file_ptr, 2);
- pin_protected_entry(2, TRUE);
+ pin_protected_entry(2, true);
unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET);
mark_entry_dirty(2);
resize_entry(2, data[2].len / 2);
resize_entry(2, data[2].len);
- unpin_entry(file_ptr, 2, TRUE, FALSE, FALSE);
+ unpin_entry(file_ptr, 2, true, false, false);
move_entry(file_ptr, 0, 20);
move_entry(file_ptr, 0, 20);
@@ -6221,8 +6221,8 @@ trace_file_check(int metadata_write_strategy)
__func__);
}
else {
- config.open_trace_file = FALSE;
- config.close_trace_file = TRUE;
+ config.open_trace_file = false;
+ config.close_trace_file = true;
config.trace_file_name[0] = '\0';
if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
@@ -6263,7 +6263,7 @@ trace_file_check(int metadata_write_strategy)
mssg.magic = MSSG_MAGIC;
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
nerrors++;
@@ -6273,7 +6273,7 @@ trace_file_check(int metadata_write_strategy)
} /* end if */
if (nerrors == 0) {
- HDsnprintf(trace_file_name, sizeof(trace_file_name), "t_cache_trace.txt.%d", (int)file_mpi_rank);
+ snprintf(trace_file_name, sizeof(trace_file_name), "t_cache_trace.txt.%d", (int)file_mpi_rank);
if ((trace_file_ptr = fopen(trace_file_name, "r")) == NULL) {
@@ -6289,10 +6289,10 @@ trace_file_check(int metadata_write_strategy)
if ((*expected_output)[i] == NULL)
expected_line_len = (size_t)0;
else
- expected_line_len = HDstrlen((*expected_output)[i]);
+ expected_line_len = strlen((*expected_output)[i]);
- if (HDfgets(buffer, 255, trace_file_ptr) != NULL)
- actual_line_len = HDstrlen(buffer);
+ if (fgets(buffer, 255, trace_file_ptr) != NULL)
+ actual_line_len = strlen(buffer);
else
actual_line_len = (size_t)0;
@@ -6301,7 +6301,7 @@ trace_file_check(int metadata_write_strategy)
if ((actual_line_len == 0) || (expected_line_len == 0)) {
if ((actual_line_len == 0) && (expected_line_len == 0)) {
/* Both ran out at the same time - we're done */
- done = TRUE;
+ done = true;
}
else {
/* One ran out before the other - BADNESS */
@@ -6327,8 +6327,7 @@ trace_file_check(int metadata_write_strategy)
}
/* We directly compare the header line (line 0) */
else if (0 == i) {
- if ((actual_line_len != expected_line_len) ||
- (HDstrcmp(buffer, (*expected_output)[i]) != 0)) {
+ if ((actual_line_len != expected_line_len) || (strcmp(buffer, (*expected_output)[i]) != 0)) {
nerrors++;
if (verbose) {
@@ -6347,9 +6346,9 @@ trace_file_check(int metadata_write_strategy)
else {
char *tok = NULL; /* token for actual line */
- tok = HDstrtok(buffer, " ");
+ tok = strtok(buffer, " ");
- if (HDstrcmp(tok, (*expected_output)[i]) != 0) {
+ if (strcmp(tok, (*expected_output)[i]) != 0) {
nerrors++;
if (verbose) {
@@ -6398,20 +6397,20 @@ trace_file_check(int metadata_write_strategy)
*
* Purpose: Sixth smoke check for the parallel cache.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
smoke_check_6(int metadata_write_strategy)
{
H5P_coll_md_read_flag_t md_reads_file_flag;
- hbool_t md_reads_context_flag;
- hbool_t success = TRUE;
+ bool md_reads_context_flag;
+ bool success = true;
int i;
int max_nerrors;
- hid_t fid = -1;
+ hid_t fid = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
struct mssg_t mssg;
@@ -6471,7 +6470,7 @@ smoke_check_6(int metadata_write_strategy)
/* insert the first half collectively */
md_reads_file_flag = H5P_USER_TRUE;
- md_reads_context_flag = TRUE;
+ md_reads_context_flag = true;
H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < virt_num_data_entries / 2; i++) {
struct datum *entry_ptr;
@@ -6479,7 +6478,7 @@ smoke_check_6(int metadata_write_strategy)
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if (TRUE != entry_ptr->header.coll_access) {
+ if (true != entry_ptr->header.coll_access) {
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: Entry inserted not marked as collective.\n", world_mpi_rank,
@@ -6496,7 +6495,7 @@ smoke_check_6(int metadata_write_strategy)
/* insert the other half independently */
md_reads_file_flag = H5P_USER_FALSE;
- md_reads_context_flag = FALSE;
+ md_reads_context_flag = false;
H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
struct datum *entry_ptr;
@@ -6504,7 +6503,7 @@ smoke_check_6(int metadata_write_strategy)
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if (FALSE != entry_ptr->header.coll_access) {
+ if (false != entry_ptr->header.coll_access) {
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: Entry inserted independently marked as collective.\n",
@@ -6528,7 +6527,7 @@ smoke_check_6(int metadata_write_strategy)
/* Protect the first half of the entries collectively */
md_reads_file_flag = H5P_USER_TRUE;
- md_reads_context_flag = TRUE;
+ md_reads_context_flag = true;
H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < (virt_num_data_entries / 2); i++) {
struct datum *entry_ptr;
@@ -6536,7 +6535,7 @@ smoke_check_6(int metadata_write_strategy)
lock_entry(file_ptr, i);
- if (TRUE != entry_ptr->header.coll_access) {
+ if (true != entry_ptr->header.coll_access) {
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: Entry protected not marked as collective.\n", world_mpi_rank,
@@ -6552,7 +6551,7 @@ smoke_check_6(int metadata_write_strategy)
/* protect the other half independently */
md_reads_file_flag = H5P_USER_FALSE;
- md_reads_context_flag = FALSE;
+ md_reads_context_flag = false;
H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
struct datum *entry_ptr;
@@ -6560,7 +6559,7 @@ smoke_check_6(int metadata_write_strategy)
lock_entry(file_ptr, i);
- if (FALSE != entry_ptr->header.coll_access) {
+ if (false != entry_ptr->header.coll_access) {
nerrors++;
if (verbose) {
fprintf(stdout, "%d:%s: Entry inserted independently marked as collective.\n",
@@ -6611,7 +6610,7 @@ smoke_check_6(int metadata_write_strategy)
if (success) {
- success = send_mssg(&mssg, FALSE);
+ success = send_mssg(&mssg, false);
if (!success) {
@@ -6816,7 +6815,7 @@ main(int argc, char **argv)
#endif
/* enable the collective metadata read property */
if (world_mpi_rank != world_server_mpi_rank) {
- if (H5Pset_all_coll_metadata_ops(fapl, TRUE) < 0) {
+ if (H5Pset_all_coll_metadata_ops(fapl, true) < 0) {
nerrors++;
if (verbose) {
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 49d1017..5de6150 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -39,12 +39,11 @@ static void create_data_sets(hid_t file_id, int min_dset, int max_dset);
static void delete_data_sets(hid_t file_id, int min_dset, int max_dset);
#endif
-static void open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected,
- const hbool_t read_only, const hbool_t set_mdci_fapl, const hbool_t config_fsm,
- const hbool_t enable_page_buffer, const char *hdf_file_name,
- const unsigned cache_image_flags, hid_t *file_id_ptr, H5F_t **file_ptr_ptr,
- H5C_t **cache_ptr_ptr, MPI_Comm comm, MPI_Info info, int l_facc_type,
- const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write,
+static void open_hdf5_file(const bool create_file, const bool mdci_sbem_expected, const bool read_only,
+ const bool set_mdci_fapl, const bool config_fsm, const bool enable_page_buffer,
+ const char *hdf_file_name, const unsigned cache_image_flags, hid_t *file_id_ptr,
+ H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, MPI_Comm comm, MPI_Info info,
+ int l_facc_type, const bool all_coll_metadata_ops, const bool coll_metadata_write,
const int md_write_strat);
static void verify_data_sets(hid_t file_id, int min_dset, int max_dset);
@@ -56,14 +55,14 @@ static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, in
static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank);
-static hbool_t serial_insert_cache_image(int file_name_idx, int mpi_size);
-static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
+static bool serial_insert_cache_image(int file_name_idx, int mpi_size);
+static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
/* top level test function declarations */
static unsigned verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank);
static unsigned verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank);
-static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size);
+static bool smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size);
/****************************************************************************/
/***************************** Utility Functions ****************************/
@@ -129,8 +128,8 @@ construct_test_file(int test_file_index)
{
const char *fcn_name = "construct_test_file()";
char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
+ bool show_progress = false;
+ hid_t file_id = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
int cp = 0;
@@ -139,7 +138,7 @@ construct_test_file(int test_file_index)
MPI_Comm dummy_comm = MPI_COMM_WORLD;
MPI_Info dummy_info = MPI_INFO_NULL;
- pass = TRUE;
+ pass = true;
if (show_progress)
fprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -151,7 +150,7 @@ construct_test_file(int test_file_index)
if (h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname() failed.\n";
}
}
@@ -168,12 +167,12 @@ construct_test_file(int test_file_index)
if (pass) {
- open_hdf5_file(/* create_file */ TRUE,
- /* mdci_sbem_expected */ FALSE,
- /* read_only */ FALSE,
- /* set_mdci_fapl */ TRUE,
- /* config_fsm */ TRUE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ true,
+ /* mdci_sbem_expected */ false,
+ /* read_only */ false,
+ /* set_mdci_fapl */ true,
+ /* config_fsm */ true,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -182,8 +181,8 @@ construct_test_file(int test_file_index)
/* comm */ dummy_comm,
/* info */ dummy_info,
/* l_facc_type */ 0,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ 0);
}
@@ -202,7 +201,7 @@ construct_test_file(int test_file_index)
if (cache_ptr->images_loaded != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "metadata cache image block loaded(1).";
}
}
@@ -217,7 +216,7 @@ construct_test_file(int test_file_index)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -235,12 +234,12 @@ construct_test_file(int test_file_index)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ TRUE,
- /* read_only */ FALSE,
- /* set_mdci_fapl */ TRUE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ true,
+ /* read_only */ false,
+ /* set_mdci_fapl */ true,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -249,8 +248,8 @@ construct_test_file(int test_file_index)
/* comm */ dummy_comm,
/* info */ dummy_info,
/* l_facc_type */ 0,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ 0);
}
@@ -269,7 +268,7 @@ construct_test_file(int test_file_index)
if (cache_ptr->images_loaded == 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "metadata cache image block not loaded(1).";
}
}
@@ -284,7 +283,7 @@ construct_test_file(int test_file_index)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -302,12 +301,12 @@ construct_test_file(int test_file_index)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ TRUE,
- /* read_only */ TRUE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ true,
+ /* read_only */ true,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -316,8 +315,8 @@ construct_test_file(int test_file_index)
/* comm */ dummy_comm,
/* info */ dummy_info,
/* l_facc_type */ 0,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ 0);
}
@@ -339,7 +338,7 @@ construct_test_file(int test_file_index)
if (cache_ptr->images_loaded == 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "metadata cache image block not loaded(2).";
}
}
@@ -354,7 +353,7 @@ construct_test_file(int test_file_index)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -366,16 +365,16 @@ construct_test_file(int test_file_index)
/*-------------------------------------------------------------------------
* Function: create_data_sets()
*
- * Purpose: If pass is TRUE on entry, create the specified data sets
+ * Purpose: If pass is true on entry, create the specified data sets
* in the indicated file.
*
* Data sets and their contents must be well know, as we
* will verify that they contain the expected data later.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -387,18 +386,18 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char *fcn_name = "create_data_sets()";
char dset_name[64];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
+ bool show_progress = false;
+ bool valid_chunk;
+ bool verbose = false;
int cp = 0;
int i, j, k, l, m;
int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
herr_t status;
- hid_t dataspace_id = -1;
+ hid_t dataspace_id = H5I_INVALID_HID;
hid_t filespace_ids[MAX_NUM_DSETS];
- hid_t memspace_id = -1;
+ hid_t memspace_id = H5I_INVALID_HID;
hid_t dataset_ids[MAX_NUM_DSETS];
- hid_t properties = -1;
+ hid_t properties = H5I_INVALID_HID;
hsize_t dims[2];
hsize_t a_size[2];
hsize_t offset[2];
@@ -425,7 +424,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (dataspace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple() failed.";
}
@@ -441,7 +440,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (properties < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate() failed.";
}
}
@@ -450,7 +449,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (H5Pset_chunk(properties, 2, chunk_size) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_chunk() failed.";
}
}
@@ -458,13 +457,13 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* create the dataset */
if (pass) {
- HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
+ snprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
if (dataset_ids[i] < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dcreate() failed.";
}
}
@@ -476,7 +475,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (filespace_ids[i] < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_space() failed.";
}
}
@@ -497,7 +496,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (memspace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple() failed.";
}
}
@@ -516,7 +515,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
@@ -547,7 +546,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk H5Sselect_hyperslab() failed.";
}
@@ -557,7 +556,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dwrite() failed.";
}
m++;
@@ -588,7 +587,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk hyperslab create failed.";
}
@@ -600,7 +599,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk hyperslab create failed.";
}
}
@@ -608,13 +607,13 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* validate the slab */
if (pass) {
- valid_chunk = TRUE;
+ valid_chunk = true;
for (k = 0; k < CHUNK_SIZE; k++) {
for (l = 0; l < CHUNK_SIZE; l++) {
if (data_chunk[k][l] !=
((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)) {
- valid_chunk = FALSE;
+ valid_chunk = false;
if (verbose) {
@@ -630,7 +629,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (!valid_chunk) {
- pass = FALSE;
+ pass = false;
failure_mssg = "slab validation failed.";
if (verbose) {
@@ -654,7 +653,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
while ((pass) && (i <= max_dset)) {
if (H5Sclose(filespace_ids[i]) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose() failed.";
}
i++;
@@ -665,7 +664,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
while ((pass) && (i <= max_dset)) {
if (H5Dclose(dataset_ids[i]) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose() failed.";
}
i++;
@@ -676,7 +675,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
if (H5Sclose(memspace_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
}
@@ -688,17 +687,17 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/*-------------------------------------------------------------------------
* Function: delete_data_sets()
*
- * Purpose: If pass is TRUE on entry, verify and then delete the
+ * Purpose: If pass is true on entry, verify and then delete the
* dataset(s) indicated by min_dset and max_dset in the
* indicated file.
*
* Data sets and their contents must be well know, as we
* will verify that they contain the expected data later.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -715,7 +714,7 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char * fcn_name = "delete_data_sets()";
char dset_name[64];
- hbool_t show_progress = FALSE;
+ bool show_progress = false;
int cp = 0;
int i;
@@ -739,11 +738,11 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
while ( ( pass ) && ( i <= max_dset ) )
{
- HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
+ snprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Ldelete() failed.";
}
@@ -765,32 +764,32 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
* and test to see if it has a metadata cache image superblock
* extension message.
*
- * Set pass to FALSE and issue a suitable failure
+ * Set pass to false and issue a suitable failure
* message if either the file contains a metadata cache image
- * superblock extension and mdci_sbem_expected is TRUE, or
+ * superblock extension and mdci_sbem_expected is true, or
* vice versa.
*
- * If mdci_sbem_expected is TRUE, also verify that the metadata
+ * If mdci_sbem_expected is true, also verify that the metadata
* cache has been advised of this.
*
- * If read_only is TRUE, open the file read only. Otherwise
+ * If read_only is true, open the file read only. Otherwise
* open the file read/write.
*
- * If set_mdci_fapl is TRUE, set the metadata cache image
+ * If set_mdci_fapl is true, set the metadata cache image
* FAPL entry when opening the file, and verify that the
* metadata cache is notified.
*
- * If config_fsm is TRUE, setup the persistent free space
+ * If config_fsm is true, setup the persistent free space
* manager. Note that this flag may only be set if
- * create_file is also TRUE.
+ * create_file is also true.
*
* Return pointers to the cache data structure and file data
* structures.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -798,25 +797,24 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
*/
static void
-open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, const hbool_t read_only,
- const hbool_t set_mdci_fapl, const hbool_t config_fsm, const hbool_t enable_page_buffer,
+open_hdf5_file(const bool create_file, const bool mdci_sbem_expected, const bool read_only,
+ const bool set_mdci_fapl, const bool config_fsm, const bool enable_page_buffer,
const char *hdf_file_name, const unsigned cache_image_flags, hid_t *file_id_ptr,
H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, MPI_Comm comm, MPI_Info info, int l_facc_type,
- const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write,
- const int md_write_strat)
+ const bool all_coll_metadata_ops, const bool coll_metadata_write, const int md_write_strat)
{
const char *fcn_name = "open_hdf5_file()";
- hbool_t show_progress = FALSE;
- hbool_t verbose = FALSE;
+ bool show_progress = false;
+ bool verbose = false;
int cp = 0;
- hid_t fapl_id = -1;
- hid_t fcpl_id = -1;
- hid_t file_id = -1;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t fcpl_id = H5I_INVALID_HID;
+ hid_t file_id = H5I_INVALID_HID;
herr_t result;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
H5C_cache_image_ctl_t image_ctl;
- H5AC_cache_image_config_t cache_image_config = {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, TRUE, FALSE,
+ H5AC_cache_image_config_t cache_image_config = {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, true, false,
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
assert(!create_file || config_fsm);
@@ -835,7 +833,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
failure_mssg = "Bad param(s) on entry to open_hdf5_file().\n";
- pass = FALSE;
+ pass = false;
}
else if (verbose) {
@@ -853,7 +851,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (fapl_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate() failed.\n";
}
}
@@ -866,7 +864,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_libver_bounds() failed.\n";
}
}
@@ -881,16 +879,16 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (result < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pget_mdc_image_config() failed.\n";
}
if ((cache_image_config.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
- (cache_image_config.generate_image != FALSE) ||
- (cache_image_config.save_resize_status != FALSE) ||
+ (cache_image_config.generate_image != false) ||
+ (cache_image_config.save_resize_status != false) ||
(cache_image_config.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected default cache image config.\n";
}
}
@@ -902,15 +900,15 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if ((pass) && (set_mdci_fapl)) {
/* set cache image config fields to taste */
- cache_image_config.generate_image = TRUE;
- cache_image_config.save_resize_status = FALSE;
+ cache_image_config.generate_image = true;
+ cache_image_config.save_resize_status = false;
cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
result = H5Pset_mdc_image_config(fapl_id, &cache_image_config);
if (result < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_mdc_image_config() failed.\n";
}
}
@@ -925,15 +923,15 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (fcpl_id <= 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
}
}
if ((pass) && (config_fsm)) {
- if (H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) == FAIL) {
- pass = FALSE;
+ if (H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, true, (hsize_t)1) == FAIL) {
+ pass = false;
failure_mssg = "H5Pset_file_space_strategy() failed.\n";
}
}
@@ -942,7 +940,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5Pset_file_space_page_size(fcpl_id, PAGE_SIZE) == FAIL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_file_space_page_size() failed.\n";
}
}
@@ -955,7 +953,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5Pset_page_buffer_size(fapl_id, PB_SIZE, 0, 0) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_page_buffer_size() failed.\n";
}
}
@@ -968,7 +966,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
/* set Parallel access with communicator */
if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_fapl_mpio() failed.\n";
}
}
@@ -980,7 +978,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5Pset_all_coll_metadata_ops(fapl_id, all_coll_metadata_ops) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_all_coll_metadata_ops() failed.\n";
}
}
@@ -992,7 +990,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5Pset_coll_metadata_write(fapl_id, coll_metadata_write) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_coll_metadata_write() failed.\n";
}
}
@@ -1009,7 +1007,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5Pget_mdc_config(fapl_id, &mdc_config) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pget_mdc_config() failed.\n";
}
@@ -1017,7 +1015,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5Pset_mdc_config(fapl_id, &mdc_config) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_mdc_config() failed.\n";
}
}
@@ -1050,7 +1048,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (file_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fcreate() or H5Fopen() failed.\n";
}
else {
@@ -1059,7 +1057,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (file_ptr == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't get file_ptr.";
if (verbose) {
@@ -1079,7 +1077,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (file_ptr->shared->cache == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "can't get cache pointer(1).\n";
}
else {
@@ -1099,13 +1097,13 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if ((file_ptr->shared->page_buf) && ((!enable_page_buffer) || (l_facc_type == FACC_MPIO))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "page buffer unexpectedly enabled.";
}
else if ((file_ptr->shared->page_buf != NULL) &&
((enable_page_buffer) || (l_facc_type != FACC_MPIO))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "page buffer unexpectedly disabled.";
}
}
@@ -1124,7 +1122,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5C__get_cache_image_config(cache_ptr, &image_ctl) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "error returned by H5C__get_cache_image_config().";
}
}
@@ -1139,22 +1137,22 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (read_only) {
if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
- (image_ctl.generate_image != FALSE) || (image_ctl.save_resize_status != FALSE) ||
+ (image_ctl.generate_image != false) || (image_ctl.save_resize_status != false) ||
(image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) ||
(image_ctl.flags != H5C_CI__ALL_FLAGS)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected image_ctl values(1).\n";
}
}
else {
if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
- (image_ctl.generate_image != TRUE) || (image_ctl.save_resize_status != FALSE) ||
+ (image_ctl.generate_image != true) || (image_ctl.save_resize_status != false) ||
(image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) ||
(image_ctl.flags != H5C_CI__ALL_FLAGS)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected image_ctl values(2).\n";
}
}
@@ -1162,11 +1160,11 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
else {
if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) ||
- (image_ctl.generate_image != FALSE) || (image_ctl.save_resize_status != FALSE) ||
+ (image_ctl.generate_image != false) || (image_ctl.save_resize_status != false) ||
(image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) ||
(image_ctl.flags != H5C_CI__ALL_FLAGS)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected image_ctl values(3).\n";
}
}
@@ -1181,7 +1179,7 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (H5C_set_cache_image_config(file_ptr, cache_ptr, &image_ctl) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "error returned by H5C_set_cache_image_config().";
}
}
@@ -1191,9 +1189,9 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (pass) {
- if (cache_ptr->close_warning_received == TRUE) {
+ if (cache_ptr->close_warning_received == true) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value of close_warning_received.\n";
}
@@ -1201,26 +1199,26 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
if (read_only) {
- if ((cache_ptr->load_image != TRUE) || (cache_ptr->delete_image != FALSE)) {
+ if ((cache_ptr->load_image != true) || (cache_ptr->delete_image != false)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "mdci sb extension message not present?\n";
}
}
else {
- if ((cache_ptr->load_image != TRUE) || (cache_ptr->delete_image != TRUE)) {
+ if ((cache_ptr->load_image != true) || (cache_ptr->delete_image != true)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "mdci sb extension message not present?\n";
}
}
}
else {
- if ((cache_ptr->load_image == TRUE) || (cache_ptr->delete_image == TRUE)) {
+ if ((cache_ptr->load_image == true) || (cache_ptr->delete_image == true)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "mdci sb extension message present?\n";
}
}
@@ -1253,10 +1251,10 @@ open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, cons
* Purpose: Collectively create a chunked dataset, and fill it with
* known values.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -1268,9 +1266,9 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
{
const char *fcn_name = "par_create_dataset()";
char dset_name[256];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
+ bool show_progress = false;
+ bool valid_chunk;
+ bool verbose = false;
int cp = 0;
int i, j, k, l;
int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
@@ -1279,17 +1277,17 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
hsize_t offset[3];
hsize_t chunk_size[3];
hid_t status;
- hid_t dataspace_id = -1;
- hid_t memspace_id = -1;
- hid_t dset_id = -1;
- hid_t filespace_id = -1;
- hid_t dcpl_id = -1;
- hid_t dxpl_id = -1;
+ hid_t dataspace_id = H5I_INVALID_HID;
+ hid_t memspace_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t filespace_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
+ snprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
fprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1306,7 +1304,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (dataspace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple() failed.";
}
}
@@ -1324,7 +1322,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (dcpl_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_DATASET_CREATE) failed.";
}
}
@@ -1340,7 +1338,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (H5Pset_chunk(dcpl_id, 3, chunk_size) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_chunk() failed.";
}
}
@@ -1356,7 +1354,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (dset_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dcreate() failed.";
}
}
@@ -1371,7 +1369,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (filespace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_space() failed.";
}
}
@@ -1389,7 +1387,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (memspace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple() failed.";
}
}
@@ -1410,7 +1408,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
@@ -1425,7 +1423,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (dxpl_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
}
}
@@ -1437,7 +1435,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_dxpl_mpio() failed.";
}
}
@@ -1476,7 +1474,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk H5Sselect_hyperslab() failed.";
}
@@ -1488,7 +1486,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dwrite() failed.";
}
@@ -1522,7 +1520,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk hyperslab create failed.";
}
@@ -1533,7 +1531,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "chunk read failed.";
}
}
@@ -1541,13 +1539,13 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
/* validate the slab */
if (pass) {
- valid_chunk = TRUE;
+ valid_chunk = true;
for (k = 0; k < CHUNK_SIZE; k++) {
for (l = 0; l < CHUNK_SIZE; l++) {
if (data_chunk[0][k][l] !=
((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num)) {
- valid_chunk = FALSE;
+ valid_chunk = false;
if (verbose) {
@@ -1564,7 +1562,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
if (!valid_chunk) {
- pass = FALSE;
+ pass = false;
failure_mssg = "slab validation failed.";
if (verbose) {
@@ -1584,42 +1582,42 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
/* close the data space */
if ((pass) && (H5Sclose(dataspace_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(dataspace_id) failed.";
}
/* close the file space */
if ((pass) && (H5Sclose(filespace_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(filespace_id) failed.";
}
/* close the dataset */
if ((pass) && (H5Dclose(dset_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose(dset_id) failed.";
}
/* close the mem space */
if ((pass) && (H5Sclose(memspace_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
/* close the dataset creation property list */
if ((pass) && (H5Pclose(dcpl_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(dcpl) failed.";
}
/* close the data access property list */
if ((pass) && (H5Pclose(dxpl_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(dxpl) failed.";
}
@@ -1635,10 +1633,10 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
*
* Purpose: Collectively delete the specified dataset.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -1650,12 +1648,12 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
{
const char *fcn_name = "par_delete_dataset()";
char dset_name[256];
- hbool_t show_progress = FALSE;
+ bool show_progress = false;
int cp = 0;
show_progress = (show_progress && (mpi_rank == 0));
- HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
+ snprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
fprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1676,7 +1674,7 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Ldelete() failed.";
}
}
@@ -1699,10 +1697,10 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
* All processes wait until the child process completes, and
* then return.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -1741,10 +1739,10 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size)
*
* Purpose: Collectively verify the contents of a chunked dataset.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -1756,9 +1754,9 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
{
const char *fcn_name = "par_verify_dataset()";
char dset_name[256];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
+ bool show_progress = false;
+ bool valid_chunk;
+ bool verbose = false;
int cp = 0;
int i, j, k, l;
int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
@@ -1766,15 +1764,15 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
hsize_t a_size[3];
hsize_t offset[3];
hid_t status;
- hid_t memspace_id = -1;
- hid_t dset_id = -1;
- hid_t filespace_id = -1;
- hid_t dxpl_id = -1;
+ hid_t memspace_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t filespace_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
+ snprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
fprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1789,7 +1787,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (dset_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dopen2() failed.";
}
}
@@ -1801,7 +1799,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (filespace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_space() failed.";
}
}
@@ -1819,7 +1817,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (memspace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple() failed.";
}
}
@@ -1840,7 +1838,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
@@ -1855,7 +1853,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (dxpl_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
}
}
@@ -1867,7 +1865,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_dxpl_mpio() failed.";
}
}
@@ -1892,7 +1890,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk hyperslab create failed.";
}
@@ -1903,7 +1901,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "chunk read failed.";
}
}
@@ -1911,13 +1909,13 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
/* validate the slab */
if (pass) {
- valid_chunk = TRUE;
+ valid_chunk = true;
for (k = 0; k < CHUNK_SIZE; k++) {
for (l = 0; l < CHUNK_SIZE; l++) {
if (data_chunk[0][k][l] !=
((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num)) {
- valid_chunk = FALSE;
+ valid_chunk = false;
if (verbose) {
@@ -1934,7 +1932,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
if (!valid_chunk) {
- pass = FALSE;
+ pass = false;
failure_mssg = "slab validation failed.";
if (verbose) {
@@ -1954,28 +1952,28 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
/* close the file space */
if ((pass) && (H5Sclose(filespace_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(filespace_id) failed.";
}
/* close the dataset */
if ((pass) && (H5Dclose(dset_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose(dset_id) failed.";
}
/* close the mem space */
if ((pass) && (H5Sclose(memspace_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
/* close the data access property list */
if ((pass) && (H5Pclose(dxpl_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(dxpl) failed.";
}
@@ -1995,29 +1993,29 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
* of the file before closing.
*
* On failure, print an appropriate error message and
- * return FALSE.
+ * return false.
*
- * Return: TRUE if successful, FALSE otherwise.
+ * Return: true if successful, false otherwise.
*
*-------------------------------------------------------------------------
*/
-static hbool_t
+static bool
serial_insert_cache_image(int file_name_idx, int mpi_size)
{
const char *fcn_name = "serial_insert_cache_image()";
char filename[512];
- hbool_t show_progress = FALSE;
+ bool show_progress = false;
int cp = 0;
int i;
int num_dsets = PAR_NUM_DSETS;
- hid_t file_id = -1;
+ hid_t file_id = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
MPI_Comm dummy_comm = MPI_COMM_WORLD;
MPI_Info dummy_info = MPI_INFO_NULL;
- pass = TRUE;
+ pass = true;
if (show_progress)
fprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2029,7 +2027,7 @@ serial_insert_cache_image(int file_name_idx, int mpi_size)
if (h5_fixname(FILENAMES[file_name_idx], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = false;
fprintf(stdout, "h5_fixname() failed.\n");
}
}
@@ -2042,12 +2040,12 @@ serial_insert_cache_image(int file_name_idx, int mpi_size)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ FALSE,
- /* read_only */ FALSE,
- /* set_mdci_fapl */ TRUE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ false,
+ /* read_only */ false,
+ /* set_mdci_fapl */ true,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -2056,8 +2054,8 @@ serial_insert_cache_image(int file_name_idx, int mpi_size)
/* comm */ dummy_comm,
/* info */ dummy_info,
/* l_facc_type */ 0,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ 1);
}
@@ -2082,7 +2080,7 @@ serial_insert_cache_image(int file_name_idx, int mpi_size)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -2099,10 +2097,10 @@ serial_insert_cache_image(int file_name_idx, int mpi_size)
*
* Purpose: Verify the contents of a chunked dataset.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -2114,9 +2112,9 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
{
const char *fcn_name = "serial_verify_dataset()";
char dset_name[256];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
+ bool show_progress = false;
+ bool valid_chunk;
+ bool verbose = false;
int cp = 0;
int i, j, k, l, m;
int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
@@ -2124,11 +2122,11 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
hsize_t a_size[3];
hsize_t offset[3];
hid_t status;
- hid_t memspace_id = -1;
- hid_t dset_id = -1;
- hid_t filespace_id = -1;
+ hid_t memspace_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t filespace_id = H5I_INVALID_HID;
- HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
+ snprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
fprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2143,7 +2141,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
if (dset_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dopen2() failed.";
}
}
@@ -2155,7 +2153,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
if (filespace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_space() failed.";
}
}
@@ -2173,7 +2171,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
if (memspace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple() failed.";
}
}
@@ -2194,7 +2192,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
@@ -2221,7 +2219,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk hyperslab create failed.";
}
@@ -2233,7 +2231,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "chunk read failed.";
}
}
@@ -2241,14 +2239,14 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
/* validate the slab */
if (pass) {
- valid_chunk = TRUE;
+ valid_chunk = true;
for (l = 0; l < CHUNK_SIZE; l++) {
for (m = 0; m < CHUNK_SIZE; m++) {
if (data_chunk[0][l][m] !=
((DSET_SIZE * DSET_SIZE * i) + (DSET_SIZE * (j + l)) + k + m + dset_num)) {
- valid_chunk = FALSE;
+ valid_chunk = false;
if (verbose) {
@@ -2265,7 +2263,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
if (!valid_chunk) {
- pass = FALSE;
+ pass = false;
failure_mssg = "slab validation failed.";
if (verbose) {
@@ -2287,21 +2285,21 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
/* close the file space */
if ((pass) && (H5Sclose(filespace_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(filespace_id) failed.";
}
/* close the dataset */
if ((pass) && (H5Dclose(dset_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose(dset_id) failed.";
}
/* close the mem space */
if ((pass) && (H5Sclose(memspace_id) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
@@ -2315,7 +2313,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
/*-------------------------------------------------------------------------
* Function: verify_data_sets()
*
- * Purpose: If pass is TRUE on entry, verify that the data sets in the
+ * Purpose: If pass is true on entry, verify that the data sets in the
* file exist and contain the expected data.
*
* Note that these data sets were created by
@@ -2323,10 +2321,10 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
* function must be reflected in this function, and
* vise-versa.
*
- * On failure, set pass to FALSE, and set failure_mssg
+ * On failure, set pass to false, and set failure_mssg
* to point to an appropriate failure message.
*
- * Do nothing if pass is FALSE on entry.
+ * Do nothing if pass is false on entry.
*
* Return: void
*
@@ -2338,15 +2336,15 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char *fcn_name = "verify_data_sets()";
char dset_name[64];
- hbool_t show_progress = FALSE;
- hbool_t valid_chunk;
- hbool_t verbose = FALSE;
+ bool show_progress = false;
+ bool valid_chunk;
+ bool verbose = false;
int cp = 0;
int i, j, k, l, m;
int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
herr_t status;
hid_t filespace_ids[MAX_NUM_DSETS];
- hid_t memspace_id = -1;
+ hid_t memspace_id = H5I_INVALID_HID;
hid_t dataset_ids[MAX_NUM_DSETS];
hsize_t dims[2];
hsize_t a_size[2];
@@ -2369,12 +2367,12 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* open the dataset */
if (pass) {
- HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
+ snprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
if (dataset_ids[i] < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dopen2() failed.";
}
}
@@ -2386,7 +2384,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if (filespace_ids[i] < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_space() failed.";
}
}
@@ -2407,7 +2405,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if (memspace_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple() failed.";
}
}
@@ -2426,7 +2424,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed.";
}
}
@@ -2451,7 +2449,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk hyperslab create failed.";
}
@@ -2463,7 +2461,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if (status < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "disk hyperslab create failed.";
}
}
@@ -2471,13 +2469,13 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* validate the slab */
if (pass) {
- valid_chunk = TRUE;
+ valid_chunk = true;
for (k = 0; k < CHUNK_SIZE; k++) {
for (l = 0; l < CHUNK_SIZE; l++) {
if (data_chunk[k][l] !=
((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)) {
- valid_chunk = FALSE;
+ valid_chunk = false;
if (verbose) {
@@ -2493,7 +2491,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if (!valid_chunk) {
- pass = FALSE;
+ pass = false;
failure_mssg = "slab validation failed.";
if (verbose) {
@@ -2517,7 +2515,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
while ((pass) && (i <= max_dset)) {
if (H5Sclose(filespace_ids[i]) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose() failed.";
}
i++;
@@ -2528,7 +2526,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
while ((pass) && (i <= max_dset)) {
if (H5Dclose(dataset_ids[i]) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose() failed.";
}
i++;
@@ -2539,7 +2537,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
if (H5Sclose(memspace_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace_id) failed.";
}
}
@@ -2592,13 +2590,13 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
{
const char *fcn_name = "verify_cache_image_RO()";
char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
+ bool show_progress = false;
+ hid_t file_id = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
int cp = 0;
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
@@ -2614,7 +2612,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
default:
TESTING("parallel CI load test -- unknown md write -- R/o");
- pass = FALSE;
+ pass = false;
break;
}
}
@@ -2629,7 +2627,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname() failed.\n";
}
}
@@ -2644,12 +2642,12 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ TRUE,
- /* read_only */ TRUE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ true,
+ /* read_only */ true,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -2658,8 +2656,8 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* comm */ MPI_COMM_WORLD,
/* info */ MPI_INFO_NULL,
/* l_facc_type */ FACC_MPIO,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ md_write_strat);
}
@@ -2686,7 +2684,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected images_read.";
}
}
@@ -2707,7 +2705,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Image not loaded?.";
}
}
@@ -2722,7 +2720,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -2734,12 +2732,12 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ TRUE,
- /* read_only */ TRUE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ true,
+ /* read_only */ true,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -2748,8 +2746,8 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* comm */ MPI_COMM_WORLD,
/* info */ MPI_INFO_NULL,
/* l_facc_type */ FACC_MPIO,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ md_write_strat);
}
@@ -2768,7 +2766,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = false;
failure_mssg = "metadata cache image block not loaded(2).";
}
}
@@ -2780,7 +2778,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -2852,13 +2850,13 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
{
const char *fcn_name = "verify_cache_imageRW()";
char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
+ bool show_progress = false;
+ hid_t file_id = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
int cp = 0;
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
@@ -2874,7 +2872,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
default:
TESTING("parallel CI load test -- unknown md write -- R/W");
- pass = FALSE;
+ pass = false;
break;
}
}
@@ -2889,7 +2887,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname() failed.\n";
}
}
@@ -2909,12 +2907,12 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ TRUE,
- /* read_only */ FALSE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ true,
+ /* read_only */ false,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -2923,8 +2921,8 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* comm */ MPI_COMM_WORLD,
/* info */ MPI_INFO_NULL,
/* l_facc_type */ FACC_MPIO,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ md_write_strat);
}
@@ -2950,7 +2948,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected images_read.";
}
}
@@ -2971,7 +2969,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Image not loaded?.";
}
}
@@ -2986,7 +2984,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -2998,12 +2996,12 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ FALSE,
- /* read_only */ FALSE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ false,
+ /* read_only */ false,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -3012,8 +3010,8 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* comm */ MPI_COMM_WORLD,
/* info */ MPI_INFO_NULL,
/* l_facc_type */ FACC_MPIO,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ FALSE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ false,
/* md_write_strat */ md_write_strat);
}
@@ -3032,7 +3030,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (cache_ptr->images_loaded != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "metadata cache image block loaded(1).";
}
}
@@ -3044,7 +3042,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -3061,7 +3059,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "HDremove() failed.\n";
}
}
@@ -3104,18 +3102,18 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
* of the test will have to be re-worked when and if page
* buffering is supported in parallel.
*
- * Return: Success: TRUE
+ * Return: Success: true
*
- * Failure: FALSE
+ * Failure: false
*
*****************************************************************************/
-static hbool_t
+static bool
smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
{
const char *fcn_name = "smoke_check_1()";
char filename[512];
- hbool_t show_progress = FALSE;
- hid_t file_id = -1;
+ bool show_progress = false;
+ hid_t file_id = H5I_INVALID_HID;
H5F_t *file_ptr = NULL;
H5C_t *cache_ptr = NULL;
int cp = 0;
@@ -3124,7 +3122,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
int test_file_index = 2;
h5_stat_size_t file_size;
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
@@ -3141,7 +3139,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname() failed.\n";
}
}
@@ -3156,12 +3154,12 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (pass) {
- open_hdf5_file(/* create_file */ TRUE,
- /* mdci_sbem_expected */ FALSE,
- /* read_only */ FALSE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ TRUE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ true,
+ /* mdci_sbem_expected */ false,
+ /* read_only */ false,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ true,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -3170,8 +3168,8 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* comm */ mpi_comm,
/* info */ mpi_info,
/* l_facc_type */ FACC_MPIO,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ TRUE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ true,
/* md_write_strat */ 1);
}
@@ -3205,7 +3203,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.\n";
}
}
@@ -3227,12 +3225,12 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ TRUE,
- /* read_only */ TRUE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ true,
+ /* read_only */ true,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -3241,8 +3239,8 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* comm */ mpi_comm,
/* info */ mpi_info,
/* l_facc_type */ FACC_MPIO,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ TRUE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ true,
/* md_write_strat */ 1);
}
@@ -3274,7 +3272,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected images_read.";
}
}
@@ -3295,7 +3293,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Image not loaded?.";
}
}
@@ -3310,7 +3308,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.";
}
}
@@ -3322,12 +3320,12 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (pass) {
- open_hdf5_file(/* create_file */ FALSE,
- /* mdci_sbem_expected */ TRUE,
- /* read_only */ FALSE,
- /* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
- /* enable_page_buffer */ FALSE,
+ open_hdf5_file(/* create_file */ false,
+ /* mdci_sbem_expected */ true,
+ /* read_only */ false,
+ /* set_mdci_fapl */ false,
+ /* config_fsm */ false,
+ /* enable_page_buffer */ false,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
/* file_id_ptr */ &file_id,
@@ -3336,8 +3334,8 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
/* comm */ mpi_comm,
/* info */ mpi_info,
/* l_facc_type */ FACC_MPIO,
- /* all_coll_metadata_ops */ FALSE,
- /* coll_metadata_write */ TRUE,
+ /* all_coll_metadata_ops */ false,
+ /* coll_metadata_write */ true,
/* md_write_strat */ 1);
}
@@ -3369,7 +3367,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) ||
((mpi_rank > 0) && (cache_ptr->images_read != 0))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected images_read.";
}
}
@@ -3390,7 +3388,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (cache_ptr->images_loaded != 1) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Image not loaded?.";
}
}
@@ -3417,7 +3415,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose() failed.";
}
}
@@ -3438,12 +3436,12 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if ((file_size = h5_get_file_size(filename, H5P_DEFAULT)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_get_file_size() failed.";
}
else if (file_size > 20 * 1024) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpectedly large file size.";
}
}
@@ -3460,7 +3458,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "HDremove() failed.\n";
}
}
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index 0ffe695..d02951d 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -166,7 +166,6 @@ static void
parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id,
hid_t *dataset)
{
- /* HDF5 gubbins */
hid_t memspace, dataspace; /* HDF5 file identifier */
hid_t access_plist; /* HDF5 ID for file access property list */
herr_t hrc; /* HDF5 return code */
@@ -306,7 +305,6 @@ static void
verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id,
hid_t *dataset)
{
- /* HDF5 gubbins */
hid_t dataspace, memspace; /* HDF5 file identifier */
hid_t access_plist; /* HDF5 ID for file access property list */
herr_t hrc; /* HDF5 return code */
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index f82410f..83d7511 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2902,10 +2902,10 @@ test_actual_io_mode(int selection_mode)
H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
const char *filename;
const char *test_name;
- hbool_t direct_multi_chunk_io;
- hbool_t multi_chunk_io;
- hbool_t is_chunked;
- hbool_t is_collective;
+ bool direct_multi_chunk_io;
+ bool multi_chunk_io;
+ bool is_chunked;
+ bool is_collective;
int mpi_size = -1;
int mpi_rank = -1;
int length;
@@ -2913,16 +2913,16 @@ test_actual_io_mode(int selection_mode)
int i;
MPI_Comm mpi_comm = MPI_COMM_NULL;
MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t dataset = H5I_INVALID_HID;
hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
+ hid_t fapl = H5I_INVALID_HID;
+ hid_t mem_space = H5I_INVALID_HID;
+ hid_t file_space = H5I_INVALID_HID;
+ hid_t dcpl = H5I_INVALID_HID;
+ hid_t dxpl_write = H5I_INVALID_HID;
+ hid_t dxpl_read = H5I_INVALID_HID;
hsize_t dims[RANK];
hsize_t chunk_dims[RANK];
hsize_t start[RANK];
@@ -3251,10 +3251,10 @@ test_actual_io_mode(int selection_mode)
/* Test values */
if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
- HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
- test_name);
+ snprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
+ test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
}
else {
@@ -3424,7 +3424,7 @@ actual_io_mode_tests(void)
* Test for Compact layout as the cause of breaking collective I/O.
*
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
- * Test for Externl-File storage as the cause of breaking collective I/O.
+ * Test for External-File storage as the cause of breaking collective I/O.
*
*/
#define FILE_EXTERNAL "nocolcause_extern.data"
@@ -3444,8 +3444,8 @@ test_no_collective_cause_mode(int selection_mode)
const char *filename;
const char *test_name;
- hbool_t is_chunked = 1;
- hbool_t is_independent = 0;
+ bool is_chunked = 1;
+ bool is_independent = 0;
int mpi_size = -1;
int mpi_rank = -1;
int length;
@@ -3453,17 +3453,17 @@ test_no_collective_cause_mode(int selection_mode)
int i;
MPI_Comm mpi_comm;
MPI_Info mpi_info;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t dataset = H5I_INVALID_HID;
hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl = -1;
- hid_t dcpl = -1;
- hid_t dxpl_write = -1;
- hid_t dxpl_read = -1;
+ hid_t fapl = H5I_INVALID_HID;
+ hid_t dcpl = H5I_INVALID_HID;
+ hid_t dxpl_write = H5I_INVALID_HID;
+ hid_t dxpl_read = H5I_INVALID_HID;
hsize_t dims[RANK];
- hid_t mem_space = -1;
- hid_t file_space = -1;
+ hid_t mem_space = H5I_INVALID_HID;
+ hid_t file_space = H5I_INVALID_HID;
hsize_t chunk_dims[RANK];
herr_t ret;
/* set to global value as default */
@@ -3728,12 +3728,12 @@ test_no_collective_cause_mode(int selection_mode)
/* Test values */
memset(message, 0, sizeof(message));
- HDsnprintf(message, sizeof(message),
- "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message), "Local cause of Broken Collective I/O has the correct value for %s.\n",
+ test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
memset(message, 0, sizeof(message));
- HDsnprintf(message, sizeof(message),
- "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ snprintf(message, sizeof(message),
+ "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3827,7 +3827,7 @@ dataset_atomicity(void)
herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
int i, j, k;
- hbool_t atomicity = FALSE;
+ bool atomicity = false;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
@@ -3907,7 +3907,7 @@ dataset_atomicity(void)
/* should fail */
H5E_BEGIN_TRY
{
- ret = H5Fset_mpi_atomicity(fid, TRUE);
+ ret = H5Fset_mpi_atomicity(fid, true);
}
H5E_END_TRY
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
@@ -3930,7 +3930,7 @@ dataset_atomicity(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Fset_mpi_atomicity(fid, TRUE);
+ ret = H5Fset_mpi_atomicity(fid, true);
VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
/* open dataset1 (contiguous case) */
@@ -3951,7 +3951,7 @@ dataset_atomicity(void)
/* check that the atomicity flag is set */
ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
- VRFY((atomicity == TRUE), "atomcity set failed");
+ VRFY((atomicity == true), "atomcity set failed");
MPI_Barrier(comm);
@@ -4021,11 +4021,11 @@ dataset_atomicity(void)
read_buf[i] = 8;
}
- atomicity = FALSE;
+ atomicity = false;
/* check that the atomicity flag is set */
ret = H5Fget_mpi_atomicity(fid, &atomicity);
VRFY((ret >= 0), "atomcity get failed");
- VRFY((atomicity == TRUE), "atomcity set failed");
+ VRFY((atomicity == true), "atomcity set failed");
block[0] = (hsize_t)(dim0 / mpi_size - 1);
block[1] = (hsize_t)(dim1 / mpi_size - 1);
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 9338344..a6a541b 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -120,7 +120,7 @@ test_split_comm_access(void)
void
test_page_buffer_access(void)
{
- hid_t file_id = -1; /* File ID */
+ hid_t file_id = H5I_INVALID_HID; /* File ID */
hid_t fcpl, fapl;
size_t page_count = 0;
int i, num_elements = 200;
@@ -129,7 +129,7 @@ test_page_buffer_access(void)
H5F_t *f = NULL;
herr_t ret; /* generic return value */
const char *filename;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ bool api_ctx_pushed = false; /* Whether API context pushed */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -160,7 +160,7 @@ test_page_buffer_access(void)
VRFY((file_id < 0), "H5Fcreate failed");
/* disable collective metadata writes for page buffering to work */
- ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ ret = H5Pset_coll_metadata_write(fapl, false);
VRFY((ret >= 0), "");
ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
@@ -190,7 +190,7 @@ test_page_buffer_access(void)
ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0);
VRFY((ret == 0), "");
/* collective metadata writes do not work with page buffering */
- ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
+ ret = H5Pset_coll_metadata_write(fapl_self, false);
VRFY((ret >= 0), "");
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
@@ -199,7 +199,7 @@ test_page_buffer_access(void)
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
/* Get a pointer to the internal file object */
f = (H5F_t *)H5I_object(file_id);
@@ -288,9 +288,9 @@ test_page_buffer_access(void)
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
}
@@ -300,7 +300,7 @@ test_page_buffer_access(void)
ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0);
VRFY((ret == 0), "");
/* collective metadata writes do not work with page buffering */
- ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ ret = H5Pset_coll_metadata_write(fapl, false);
VRFY((ret >= 0), "");
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
@@ -309,7 +309,7 @@ test_page_buffer_access(void)
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
/* Get a pointer to the internal file object */
f = (H5F_t *)H5I_object(file_id);
@@ -422,9 +422,9 @@ test_page_buffer_access(void)
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
free(data);
@@ -449,7 +449,7 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
H5F_t *f = NULL;
H5C_t *cache_ptr = NULL;
H5AC_cache_config_t config;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ bool api_ctx_pushed = false; /* Whether API context pushed */
herr_t ret;
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
@@ -461,14 +461,14 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
f = (H5F_t *)H5I_object(file_id);
VRFY((f != NULL), "");
cache_ptr = f->shared->cache;
- cache_ptr->ignore_tags = TRUE;
+ cache_ptr->ignore_tags = true;
H5C_stats__reset(cache_ptr);
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
@@ -514,25 +514,25 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -555,13 +555,13 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
for (i = 0; i < num_elements; i++)
VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
- HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
}
@@ -577,9 +577,9 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
MPI_Barrier(MPI_COMM_WORLD);
@@ -605,7 +605,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
H5F_t *f = NULL;
H5C_t *cache_ptr = NULL;
H5AC_cache_config_t config;
- hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ bool api_ctx_pushed = false; /* Whether API context pushed */
herr_t ret;
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
@@ -624,7 +624,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
/* Push API context */
ret = H5CX_push();
VRFY((ret == 0), "H5CX_push()");
- api_ctx_pushed = TRUE;
+ api_ctx_pushed = true;
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((ret == 0), "");
@@ -666,7 +666,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ snprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -711,7 +711,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
entry_ptr = cache_ptr->index[i];
while (entry_ptr != NULL) {
- assert(entry_ptr->is_dirty == FALSE);
+ assert(entry_ptr->is_dirty == false);
if (!entry_ptr->is_pinned && !entry_ptr->is_protected) {
ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
@@ -739,9 +739,9 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
/* Pop API context */
if (api_ctx_pushed) {
- ret = H5CX_pop(FALSE);
+ ret = H5CX_pop(false);
VRFY((ret == 0), "H5CX_pop()");
- api_ctx_pushed = FALSE;
+ api_ctx_pushed = false;
}
free(data_array);
@@ -760,7 +760,7 @@ test_file_properties(void)
hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
- hbool_t is_coll;
+ bool is_coll;
htri_t are_equal;
const char *filename;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -781,7 +781,7 @@ test_file_properties(void)
mpi_ret = MPI_Info_create(&info);
VRFY((mpi_ret >= 0), "MPI_Info_create succeeded");
mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz");
- VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set succeeded");
/* setup file access plist */
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -817,7 +817,7 @@ test_file_properties(void)
fapl_copy_id = H5Pcopy(fapl_id);
VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((TRUE == are_equal), "H5Pequal");
+ VRFY((true == are_equal), "H5Pequal");
/* Add a property to the copy and ensure it's different now */
mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
@@ -825,7 +825,7 @@ test_file_properties(void)
ret = H5Pset_mpi_params(fapl_copy_id, comm, info);
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((FALSE == are_equal), "H5Pequal");
+ VRFY((false == are_equal), "H5Pequal");
/* Add a property with the same key but a different value to the original
* and ensure they are still different.
@@ -835,7 +835,7 @@ test_file_properties(void)
ret = H5Pset_mpi_params(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((FALSE == are_equal), "H5Pequal");
+ VRFY((false == are_equal), "H5Pequal");
/* Set the second property in the original to the same
* value as the copy and ensure they are the same now.
@@ -845,7 +845,7 @@ test_file_properties(void)
ret = H5Pset_mpi_params(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
are_equal = H5Pequal(fapl_id, fapl_copy_id);
- VRFY((TRUE == are_equal), "H5Pequal");
+ VRFY((true == are_equal), "H5Pequal");
/* create the file */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
@@ -856,12 +856,12 @@ test_file_properties(void)
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement");
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
@@ -870,19 +870,19 @@ test_file_properties(void)
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
- VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+ VRFY((fid != H5I_INVALID_HID), "H5Fopen succeeded");
/* verify settings for file access properties */
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == false), "Incorrect property setting for coll metadata API calls requirement");
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
@@ -891,25 +891,25 @@ test_file_properties(void)
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
/* Collective metadata writes */
- ret = H5Pset_coll_metadata_write(fapl_id, TRUE);
+ ret = H5Pset_coll_metadata_write(fapl_id, true);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
/* Collective metadata read API calling requirement */
- ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(fapl_id, true);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
- VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+ VRFY((fid != H5I_INVALID_HID), "H5Fopen succeeded");
/* verify settings for file access properties */
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement");
/* close fapl and retrieve it from file */
ret = H5Pclose(fapl_id);
@@ -924,12 +924,12 @@ test_file_properties(void)
/* Collective metadata writes */
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata writes");
/* Collective metadata read API calling requirement */
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
- VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+ VRFY((is_coll == true), "Incorrect property setting for coll metadata API calls requirement");
/* close file */
ret = H5Fclose(fid);
@@ -980,7 +980,7 @@ test_delete(void)
/* Verify that the file is an HDF5 file */
is_accessible = H5Fis_accessible(filename, fapl_id);
- VRFY((TRUE == is_accessible), "H5Fis_accessible");
+ VRFY((true == is_accessible), "H5Fis_accessible");
/* Delete the file */
ret = H5Fdelete(filename, fapl_id);
@@ -1044,7 +1044,7 @@ test_invalid_libver_bounds_file_close_assert(void)
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
VRFY((fcpl_id != H5I_INVALID_HID), "H5Pcreate");
- ret = H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, 1);
+ ret = H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, true, 1);
VRFY((SUCCEED == ret), "H5Pset_file_space_strategy");
/* create the file */
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index 471494c..755831b 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -67,17 +67,17 @@ file_image_daisy_chain_test(void)
int space_ndims;
MPI_Status rcvstat;
int *vector_ptr = NULL;
- hid_t fapl_id = -1;
+ hid_t fapl_id = H5I_INVALID_HID;
hid_t file_id; /* file IDs */
- hid_t dset_id = -1;
- hid_t dset_type_id = -1;
- hid_t space_id = -1;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_type_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
herr_t err;
hsize_t dims[1];
void *image_ptr = NULL;
ssize_t bytes_read;
ssize_t image_len;
- hbool_t vector_ok = TRUE;
+ bool vector_ok = true;
htri_t tri_result;
/* set up MPI parameters */
@@ -85,7 +85,7 @@ file_image_daisy_chain_test(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup file name */
- HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
+ snprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
if (mpi_rank == 0) {
@@ -95,7 +95,7 @@ file_image_daisy_chain_test(void)
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
VRFY((err >= 0), "setting core file driver in fapl.");
file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id);
@@ -184,7 +184,7 @@ file_image_daisy_chain_test(void)
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
VRFY((err >= 0), "setting core file driver in fapl.");
err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
@@ -200,7 +200,7 @@ file_image_daisy_chain_test(void)
VRFY((dset_type_id >= 0), "obtained data set type");
tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
- VRFY((tri_result == TRUE), "verified data set type");
+ VRFY((tri_result == true), "verified data set type");
space_id = H5Dget_space(dset_id);
VRFY((space_id >= 0), "opened data space");
@@ -218,10 +218,10 @@ file_image_daisy_chain_test(void)
err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
- vector_ok = TRUE;
+ vector_ok = true;
for (i = 0; i < mpi_size; i++)
if (vector_ptr[i] != i)
- vector_ok = FALSE;
+ vector_ok = false;
VRFY((vector_ok), "verified received vector.");
free(vector_ptr);
@@ -266,7 +266,7 @@ file_image_daisy_chain_test(void)
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
- err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), false);
VRFY((err >= 0), "setting core file driver in fapl.");
err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
@@ -283,7 +283,7 @@ file_image_daisy_chain_test(void)
VRFY((dset_type_id >= 0), "obtained data set type");
tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
- VRFY((tri_result == TRUE), "verified data set type");
+ VRFY((tri_result == true), "verified data set type");
space_id = H5Dget_space(dset_id);
VRFY((space_id >= 0), "opened data space");
@@ -301,15 +301,15 @@ file_image_daisy_chain_test(void)
err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
- vector_ok = TRUE;
+ vector_ok = true;
for (i = 0; i < mpi_size; i++) {
if (i < mpi_rank) {
if (vector_ptr[i] != i)
- vector_ok = FALSE;
+ vector_ok = false;
}
else {
if (vector_ptr[i] != -1)
- vector_ok = FALSE;
+ vector_ok = false;
}
}
VRFY((vector_ok), "verified received vector.");
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 6d19b76..01695ab 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -145,7 +145,7 @@ filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
(unsigned long)(hs_offset[1] + j));
fprintf(stderr, " At original: %d\n", (int)points[i * size[1] + (size_t)hs_offset[1] + j]);
fprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]);
- VRFY(FALSE, "");
+ VRFY(false, "");
}
}
}
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index bfe306c..198201a 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -19,26 +19,22 @@
#include "t_filters_parallel.h"
static const char *FILENAME[] = {"t_filters_parallel", NULL};
-char filenames[1][256];
+static char filenames[1][256];
-static MPI_Comm comm = MPI_COMM_WORLD;
-static MPI_Info info = MPI_INFO_NULL;
-static int mpi_rank;
-static int mpi_size;
+static MPI_Comm comm = MPI_COMM_WORLD;
+static MPI_Info info = MPI_INFO_NULL;
+static int mpi_rank = 0;
+static int mpi_size = 0;
int nerrors = 0;
/* Arrays of filter ID values and filter names (should match each other) */
-H5Z_filter_t filterIDs[] = {
+static H5Z_filter_t filterIDs[] = {
H5Z_FILTER_DEFLATE, H5Z_FILTER_SHUFFLE, H5Z_FILTER_FLETCHER32,
H5Z_FILTER_SZIP, H5Z_FILTER_NBIT, H5Z_FILTER_SCALEOFFSET,
};
-const char *filterNames[] = {"Deflate", "Shuffle", "Fletcher32", "SZIP", "Nbit", "ScaleOffset"};
-
-/* Function pointer typedef for test functions */
-typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id);
+static const char *filterNames[] = {"Deflate", "Shuffle", "Fletcher32", "SZIP", "Nbit", "ScaleOffset"};
/* Typedef for filter arguments for user-defined filters */
typedef struct filter_options_t {
@@ -47,6 +43,15 @@ typedef struct filter_options_t {
const unsigned int cd_values[];
} filter_options_t;
+/* Enum for running these tests in different modes */
+typedef enum test_mode_t {
+ USE_SINGLE_DATASET, /* Operate on a single dataset with H5Dwrite/read */
+ USE_MULTIPLE_DATASETS, /* Operate on multiple datasets with H5Dwrite_multi/read_multi */
+ USE_MULTIPLE_DATASETS_MIXED_FILTERED, /* Operate on multiple datasets with H5Dwrite_multi/read_multi
+ and with some of the datasets being unfiltered */
+ TEST_MODE_SENTINEL
+} test_mode_t;
+
/*
* Enum for verify_space_alloc_status which specifies
* how many chunks have been written to in a dataset
@@ -58,102 +63,160 @@ typedef enum num_chunks_written_t {
ALL_CHUNKS_WRITTEN
} num_chunks_written_t;
-static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options);
-static herr_t verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written);
+/* Function pointer typedef for test functions */
+typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id, test_mode_t test_mode);
+
+static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options);
+static void verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id,
+ num_chunks_written_t chunks_written);
+static void verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id);
+static const char *test_mode_to_string(test_mode_t test_mode);
+
+static void create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t filespace_id,
+ hid_t dcpl_id, test_mode_t test_mode, size_t *num_dsets, hid_t *dset_ids);
+static void open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test_mode_t test_mode,
+ hid_t *dset_ids);
+static void write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id,
+ hid_t *fspace_ids, hid_t dxpl_id, const void **bufs, test_mode_t test_mode);
+static void read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id,
+ hid_t dxpl_id, void **bufs, test_mode_t test_mode);
+
+static void select_hyperslab(size_t num_dsets, hid_t *dset_ids, hsize_t *start, hsize_t *stride,
+ hsize_t *count, hsize_t *block, hid_t *fspace_ids);
+static void select_all(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids);
+static void select_none(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids);
+static void select_elements(size_t num_dsets, hid_t *dset_ids, size_t num_points, hsize_t *coords,
+ hid_t *fspace_ids);
#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
/* Tests for writing data in parallel */
static void test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_transformed_filtered_dataset_no_overlap(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
#endif
/* Tests for reading data in parallel */
static void test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_transformed_filtered_dataset_no_overlap(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode);
/*
* Tests for attempting to round-trip the data going from
@@ -165,27 +228,25 @@ static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char *
* written in parallel -> read serially
*/
static void test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode);
#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
static void test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode);
/* Other miscellaneous tests */
static void test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode);
static void test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode);
static void test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
-static void test_edge_chunks_partial_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode);
static void test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id);
+ hid_t dxpl_id, test_mode_t test_mode);
static void test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode);
static void test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id);
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode);
#endif
static test_func tests[] = {
@@ -232,7 +293,6 @@ static test_func tests[] = {
test_shrinking_growing_chunks,
test_edge_chunks_no_overlap,
test_edge_chunks_overlap,
- test_edge_chunks_partial_write,
test_fill_values,
test_fill_value_undefined,
test_fill_time_never,
@@ -259,11 +319,12 @@ set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_
case H5Z_FILTER_SZIP: {
unsigned pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK;
hsize_t chunk_dims[H5S_MAX_RANK] = {0};
- size_t i, chunk_nelemts;
+ size_t chunk_nelemts;
VRFY(H5Pget_chunk(dcpl_id, H5S_MAX_RANK, chunk_dims) >= 0, "H5Pget_chunk succeeded");
- for (i = 0, chunk_nelemts = 1; i < H5S_MAX_RANK; i++)
+ chunk_nelemts = 1;
+ for (size_t i = 0; i < H5S_MAX_RANK; i++)
if (chunk_dims[i] > 0)
chunk_nelemts *= chunk_dims[i];
@@ -305,26 +366,37 @@ set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_
* based on the dataset's allocation time setting and how many chunks
* in the dataset have been written to.
*/
-static herr_t
-verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written)
+static void
+verify_space_alloc_status(size_t num_dsets, hid_t *dset_ids, hid_t dcpl_id,
+ num_chunks_written_t chunks_written)
{
- int nfilters;
- herr_t ret_value = SUCCEED;
+ H5D_space_status_t space_status;
+ H5D_alloc_time_t alloc_time;
- VRFY(((nfilters = H5Pget_nfilters(dcpl_id)) >= 0), "H5Pget_nfilters succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ hid_t dset_dcpl;
+ int nfilters;
- /*
- * Only verify space allocation status when there are filters
- * in the dataset's filter pipeline. When filters aren't in the
- * pipeline, the space allocation time and status can vary based
- * on whether the file was created in parallel or serial mode.
- */
- if (nfilters > 0) {
- H5D_space_status_t space_status;
- H5D_alloc_time_t alloc_time;
+ /* Check if this particular dataset has any filters applied */
+ dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]);
+ VRFY((dset_dcpl >= 0), "H5Dget_create_plist");
+
+ nfilters = H5Pget_nfilters(dset_dcpl);
+ VRFY((nfilters >= 0), "H5Pget_nfilters");
+
+ VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose");
+
+ /*
+ * Only verify space allocation status when there are filters
+ * in the dataset's filter pipeline. When filters aren't in the
+ * pipeline, the space allocation time and status can vary based
+ * on whether the file was created in parallel or serial mode.
+ */
+ if (nfilters == 0)
+ return;
VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
- VRFY((H5Dget_space_status(dset_id, &space_status) >= 0), "H5Dget_space_status succeeded");
+ VRFY((H5Dget_space_status(dset_ids[dset_idx], &space_status) >= 0), "H5Dget_space_status succeeded");
switch (alloc_time) {
case H5D_ALLOC_TIME_EARLY:
@@ -347,7 +419,7 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu
VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED) ||
(space_status == H5D_SPACE_STATUS_PART_ALLOCATED),
"verified space allocation status");
- else if (chunks_written == NO_CHUNKS_WRITTEN)
+ else if (chunks_written == NO_CHUNKS_WRITTEN) {
/*
* A special case where we wrote to a dataset that
* uses late space allocation, but the write was
@@ -358,6 +430,7 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu
* been allocated.
*/
VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status");
+ }
else
VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status");
break;
@@ -385,8 +458,388 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
+}
+
+/*
+ * Function to verify the status of the chunk I/O optimization method
+ * used when the multi-dataset I/O API routines were used. As long as
+ * multi-dataset I/O was actually performed, the library should return
+ * that linked-chunk I/O was performed. Otherwise, if datasets were
+ * processed one at a time, the library should return that multi-chunk
+ * I/O was performed.
+ */
+static void
+verify_chunk_opt_status(size_t num_dsets, hid_t dxpl_id)
+{
+ H5D_mpio_actual_chunk_opt_mode_t chunk_opt_mode;
+ H5D_selection_io_mode_t sel_io_mode;
+ uint32_t no_sel_io_cause = 0;
+ herr_t ret;
+
+ if (H5P_DEFAULT != dxpl_id) {
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_id, &chunk_opt_mode);
+ VRFY((ret >= 0), "H5Pget_mpio_actual_chunk_opt_mode succeeded");
+
+ ret = H5Pget_selection_io(dxpl_id, &sel_io_mode);
+ VRFY((ret >= 0), "H5Pget_selection_io succeeded");
+
+ if (sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) {
+ ret = H5Pget_no_selection_io_cause(dxpl_id, &no_sel_io_cause);
+ VRFY((ret >= 0), "H5Pget_no_selection_io_cause succeeded");
+ }
+
+ if (num_dsets == 0) {
+ /*
+ * num_dsets == 0 implies that the write call was expected to
+ * failed and did so. Verify that the library returns
+ * H5D_MPIO_NO_CHUNK_OPTIMIZATION as the chunk I/O optimization
+ * method
+ */
+ VRFY((H5D_MPIO_NO_CHUNK_OPTIMIZATION == chunk_opt_mode),
+ "verified I/O optimization was H5D_MPIO_NO_CHUNK_OPTIMIZATION");
+ }
+ else if (num_dsets == 1) {
+ /*
+ * If selection I/O is set to ON and was actually performed, just
+ * verify that the library returns that either linked-chunk or
+ * multi-chunk I/O was performed. Otherwise, any of the optimization
+ * methods could potentially be returned by the library.
+ */
+ if ((sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) &&
+ !no_sel_io_cause) {
+ VRFY((H5D_MPIO_NO_CHUNK_OPTIMIZATION != chunk_opt_mode),
+ "verified I/O optimization wasn't H5D_MPIO_NO_CHUNK_OPTIMIZATION");
+ VRFY((H5D_MPIO_LINK_CHUNK == chunk_opt_mode || H5D_MPIO_MULTI_CHUNK == chunk_opt_mode),
+ "verified I/O optimization was linked-chunk I/O or multi-chunk I/O");
+ }
+ }
+ else {
+ /*
+ * If selection I/O is set to ON and was actually performed, verify
+ * that the library returns that linked-chunk I/O was performed.
+ * Otherwise, any of the optimization methods could potentially be
+ * returned by the library.
+ */
+ if ((sel_io_mode == H5D_SELECTION_IO_MODE_DEFAULT || sel_io_mode == H5D_SELECTION_IO_MODE_ON) &&
+ !no_sel_io_cause) {
+ VRFY((H5D_MPIO_LINK_CHUNK == chunk_opt_mode),
+ "verified I/O optimization was linked-chunk I/O");
+ }
+ }
+ }
+}
+
+static const char *
+test_mode_to_string(test_mode_t test_mode)
+{
+ switch (test_mode) {
+ case USE_SINGLE_DATASET:
+ return "USE_SINGLE_DATASET";
+ case USE_MULTIPLE_DATASETS:
+ return "USE_MULTIPLE_DATASETS";
+ case USE_MULTIPLE_DATASETS_MIXED_FILTERED:
+ return "USE_MULTIPLE_DATASETS_MIXED_FILTERED";
+ case TEST_MODE_SENTINEL:
+ default:
+ return "INVALID";
+ }
+}
+
+/*
+ * Utility routine to create the datasets for each test,
+ * after adjusting for the current test mode
+ */
+static void
+create_datasets(hid_t parent_obj_id, const char *dset_name, hid_t type_id, hid_t filespace_id, hid_t dcpl_id,
+ test_mode_t test_mode, size_t *num_dsets, hid_t *dset_ids)
+{
+ const char *dset_name_ptr = NULL;
+ hid_t unfiltered_dcpl = H5I_INVALID_HID;
+ char dset_name_multi_buf[512];
+ int n_dsets = 0;
+ int n_unfiltered = 0;
+
+ VRFY((num_dsets != NULL), "verify num_dsets");
+ VRFY((dset_ids != NULL), "verify dset_ids");
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ dset_ids[dset_idx] = H5I_INVALID_HID;
+
+ switch (test_mode) {
+ case USE_SINGLE_DATASET:
+ dset_name_ptr = dset_name;
+ n_dsets = 1;
+ break;
+
+ case USE_MULTIPLE_DATASETS:
+ case USE_MULTIPLE_DATASETS_MIXED_FILTERED:
+ dset_name_ptr = dset_name_multi_buf;
+ n_dsets = (rand() % (MAX_NUM_DSETS_MULTI - 1)) + 2;
+
+ /* Select between 1 and (n_dsets - 1) datasets to NOT be filtered */
+ if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) {
+ n_unfiltered = (rand() % (n_dsets - 1)) + 1;
+
+ unfiltered_dcpl = H5Pcopy(dcpl_id);
+ VRFY((unfiltered_dcpl >= 0), "H5Pcopy succeeded");
+
+ VRFY((H5Premove_filter(unfiltered_dcpl, H5Z_FILTER_ALL) >= 0), "H5Premove_filter succeeded");
+ }
+ break;
+
+ case TEST_MODE_SENTINEL:
+ default:
+ if (MAINPROCESS)
+ printf("Invalid test mode\n");
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < (size_t)n_dsets; dset_idx++) {
+ hid_t curr_dcpl = dcpl_id;
+
+ /* Add suffix to dataset name for multi-dataset tests */
+ if (test_mode == USE_MULTIPLE_DATASETS || test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)
+ snprintf(dset_name_multi_buf, 512, "%s_%d", dset_name, (int)dset_idx);
+
+ /* Determine if this should be an unfiltered dataset */
+ if ((test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) && (n_unfiltered > 0)) {
+ size_t dsets_left = (size_t)n_dsets - dset_idx;
+ bool unfiltered;
+
+ /*
+ * The number of unfiltered datasets should never be
+ * greater than the number of datasets left to create
+ */
+ VRFY(((size_t)n_unfiltered <= dsets_left), "number of unfiltered datasets sanity check");
+
+ /*
+ * If the number of unfiltered datasets left is the
+ * same as the number of datasets left, create the
+ * remaining datasets as unfiltered datasets. Otherwise,
+ * randomly determine if a dataset will be unfiltered.
+ */
+ unfiltered = ((size_t)n_unfiltered == dsets_left) || ((rand() % 2) == 0);
+
+ if (unfiltered) {
+ curr_dcpl = unfiltered_dcpl;
+ n_unfiltered--;
+ }
+ }
+
+ dset_ids[dset_idx] = H5Dcreate2(parent_obj_id, dset_name_ptr, type_id, filespace_id, H5P_DEFAULT,
+ curr_dcpl, H5P_DEFAULT);
+
+ VRFY((dset_ids[dset_idx] >= 0), "Dataset creation succeeded");
+ }
+
+ if (unfiltered_dcpl >= 0)
+ VRFY((H5Pclose(unfiltered_dcpl) >= 0), "H5Pclose succeeded");
+
+ *num_dsets = (size_t)n_dsets;
+}
+
+/*
+ * Utility routine to open the datasets that were created
+ * for each test, after adjusting for the current test mode
+ */
+static void
+open_datasets(hid_t parent_obj_id, const char *dset_name, size_t num_dsets, test_mode_t test_mode,
+ hid_t *dset_ids)
+{
+ const char *dset_name_ptr = NULL;
+ char dset_name_multi_buf[512];
+
+ VRFY((dset_ids != NULL), "verify dset_ids");
+ VRFY((num_dsets <= INT_MAX), "verify num_dsets value");
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ dset_ids[dset_idx] = H5I_INVALID_HID;
+
+ switch (test_mode) {
+ case USE_SINGLE_DATASET:
+ dset_name_ptr = dset_name;
+ break;
+
+ case USE_MULTIPLE_DATASETS:
+ case USE_MULTIPLE_DATASETS_MIXED_FILTERED:
+ dset_name_ptr = dset_name_multi_buf;
+ break;
+
+ case TEST_MODE_SENTINEL:
+ default:
+ if (MAINPROCESS)
+ printf("Invalid test mode\n");
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ /* Add suffix to dataset name for multi-dataset tests */
+ if (test_mode == USE_MULTIPLE_DATASETS || test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED)
+ snprintf(dset_name_multi_buf, 512, "%s_%d", dset_name, (int)dset_idx);
+
+ dset_ids[dset_idx] = H5Dopen2(parent_obj_id, dset_name_ptr, H5P_DEFAULT);
+
+ VRFY((dset_ids[dset_idx] >= 0), "Dataset open succeeded");
+ }
+}
+
+/*
+ * Utility routine to write to the datasets that were created
+ * for each test, after adjusting for the current test mode
+ */
+static void
+write_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t *fspace_ids,
+ hid_t dxpl_id, const void **bufs, test_mode_t test_mode)
+{
+ hid_t mem_type_ids[MAX_NUM_DSETS_MULTI];
+ hid_t mem_space_ids[MAX_NUM_DSETS_MULTI];
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mem_type_ids[dset_idx] = type_id;
+ mem_space_ids[dset_idx] = mspace_id;
+ }
+
+ switch (test_mode) {
+ case USE_SINGLE_DATASET:
+ VRFY((H5Dwrite(dset_ids[0], type_id, mspace_id, fspace_ids[0], dxpl_id, bufs[0]) >= 0),
+ "Dataset write succeeded");
+ break;
+
+ case USE_MULTIPLE_DATASETS:
+ case USE_MULTIPLE_DATASETS_MIXED_FILTERED:
+ VRFY((H5Dwrite_multi(num_dsets, dset_ids, mem_type_ids, mem_space_ids, fspace_ids, dxpl_id,
+ bufs) >= 0),
+ "Dataset write succeeded");
+ break;
+
+ case TEST_MODE_SENTINEL:
+ default:
+ if (MAINPROCESS)
+ printf("Invalid test mode\n");
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ verify_chunk_opt_status(num_dsets, dxpl_id);
+}
+
+/*
+ * Utility routine to read from the datasets that were created
+ * for each test, after adjusting for the current test mode
+ */
+static void
+read_datasets(size_t num_dsets, hid_t *dset_ids, hid_t type_id, hid_t mspace_id, hid_t fspace_id,
+ hid_t dxpl_id, void **bufs, test_mode_t test_mode)
+{
+ hid_t mem_type_ids[MAX_NUM_DSETS_MULTI];
+ hid_t mem_space_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_space_ids[MAX_NUM_DSETS_MULTI];
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mem_type_ids[dset_idx] = type_id;
+ mem_space_ids[dset_idx] = mspace_id;
+ file_space_ids[dset_idx] = fspace_id;
+ }
+
+ switch (test_mode) {
+ case USE_SINGLE_DATASET:
+ VRFY((H5Dread(dset_ids[0], type_id, mspace_id, fspace_id, dxpl_id, bufs[0]) >= 0),
+ "Dataset read succeeded");
+ break;
+
+ case USE_MULTIPLE_DATASETS:
+ case USE_MULTIPLE_DATASETS_MIXED_FILTERED:
+ VRFY((H5Dread_multi(num_dsets, dset_ids, mem_type_ids, mem_space_ids, file_space_ids, dxpl_id,
+ bufs) >= 0),
+ "Dataset read succeeded");
+ break;
+
+ case TEST_MODE_SENTINEL:
+ default:
+ if (MAINPROCESS)
+ printf("Invalid test mode\n");
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ verify_chunk_opt_status(num_dsets, dxpl_id);
+}
+
+static void
+select_hyperslab(size_t num_dsets, hid_t *dset_ids, hsize_t *start, hsize_t *stride, hsize_t *count,
+ hsize_t *block, hid_t *fspace_ids)
+{
+ VRFY((fspace_ids != NULL), "verify fspace_ids");
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ fspace_ids[dset_idx] = H5I_INVALID_HID;
+
+ if (VERBOSE_MED) {
+ printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ fflush(stdout);
+ }
- return ret_value;
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]);
+ VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(fspace_ids[dset_idx], H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+ }
+}
+
+static void
+select_all(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids)
+{
+ VRFY((fspace_ids != NULL), "verify fspace_ids");
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ fspace_ids[dset_idx] = H5I_INVALID_HID;
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]);
+ VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_all(fspace_ids[dset_idx]) >= 0), "H5Sselect_all succeeded");
+ }
+}
+
+static void
+select_none(size_t num_dsets, hid_t *dset_ids, hid_t *fspace_ids)
+{
+ VRFY((fspace_ids != NULL), "verify fspace_ids");
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ fspace_ids[dset_idx] = H5I_INVALID_HID;
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]);
+ VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_none(fspace_ids[dset_idx]) >= 0), "H5Sselect_none succeeded");
+ }
+}
+
+static void
+select_elements(size_t num_dsets, hid_t *dset_ids, size_t num_points, hsize_t *coords, hid_t *fspace_ids)
+{
+ VRFY((fspace_ids != NULL), "verify fspace_ids");
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ fspace_ids[dset_idx] = H5I_INVALID_HID;
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ fspace_ids[dset_idx] = H5Dget_space(dset_ids[dset_idx]);
+ VRFY((fspace_ids[dset_idx] >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_elements(fspace_ids[dset_idx], H5S_SELECT_SET, num_points, coords) >= 0),
+ "Point selection succeeded");
+ }
}
#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
@@ -397,11 +850,12 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu
*/
static void
test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t sel_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
@@ -409,13 +863,16 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil
hsize_t stride[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to one-chunk filtered dataset");
+ puts("Testing write to one-chunk filtered dataset");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -434,9 +891,6 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil
filespace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -446,12 +900,12 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -467,73 +921,73 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil
start[0] = ((hsize_t)mpi_rank * sel_dims[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
data_size = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS *
- (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = ((C_DATATYPE)i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
- WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) +
- ((C_DATATYPE)i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
- WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ ((C_DATATYPE)j % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) +
+ ((C_DATATYPE)j / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) +
+ (C_DATATYPE)dset_idx;
+ }
- dset_id = H5Dopen2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -549,11 +1003,12 @@ test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t fil
*/
static void
test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -561,13 +1016,16 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi
hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered chunks");
+ puts("Testing write to unshared filtered chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -586,9 +1044,6 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi
filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -599,12 +1054,12 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -621,70 +1076,69 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi
start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
- (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx);
+ }
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -698,11 +1152,13 @@ test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fi
*/
static void
test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
@@ -710,13 +1166,16 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil
hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing partial write to unshared filtered chunks");
+ puts("Testing partial write to unshared filtered chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -746,12 +1205,12 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -768,78 +1227,77 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil
start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * count[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
-
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < (size_t)mpi_size; i++) {
- size_t rank_n_elems = (size_t)(mpi_size * (WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS *
- WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS));
- size_t data_idx = i;
-
- for (size_t j = 0; j < rank_n_elems; j++) {
- if ((j % WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS) == 0) {
- correct_buf[(i * rank_n_elems) + j] = (C_DATATYPE)data_idx;
- data_idx++;
- }
- }
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
- if (data)
- free(data);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < (size_t)mpi_size; j++) {
+ size_t data_idx = j;
+ size_t rank_n_elems = (size_t)(mpi_size * (WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS *
+ WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS));
+
+ for (size_t k = 0; k < rank_n_elems; k++) {
+ if ((k % WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS) == 0) {
+ correct_bufs[dset_idx][(j * rank_n_elems) + k] = (C_DATATYPE)(data_idx + dset_idx);
+ data_idx++;
+ }
+ }
+ }
+ }
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -856,11 +1314,12 @@ test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_fil
*/
static void
test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -868,13 +1327,16 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte
hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to shared filtered chunks");
+ puts("Testing write to shared filtered chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -893,9 +1355,6 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte
filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -906,12 +1365,12 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -927,72 +1386,71 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte
start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] =
- (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) +
- (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
- /* Verify correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ /* Verify the correct data was written */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) +
+ (j % dataset_dims[1]) +
+ (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) %
+ dataset_dims[1]) +
+ dset_idx);
+ }
- dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -1009,10 +1467,12 @@ test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filte
*/
static void
test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t max_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
@@ -1021,13 +1481,16 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group
hsize_t stride[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered chunks w/ single unlimited dimension");
+ puts("Testing write to unshared filtered chunks w/ single unlimited dimension");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1058,32 +1521,35 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- read_buf = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
- for (i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ read_bufs[dset_idx] = tmp_buf;
+ }
+ for (size_t i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
@@ -1097,56 +1563,55 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group
start[0] = ((hsize_t)mpi_rank * block[0] * count[0]);
start[1] = i * count[1] * block[1];
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
- block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids);
- memset(read_buf, 255, data_size);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ memset(read_bufs[dset_idx], 255, data_size);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
/* Verify the correct data was written */
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
- /* Extend the dataset by count[1] chunks in the extensible dimension */
+ /* Extend the dataset(s) by count[1] chunks in the extensible dimension */
dataset_dims[1] += count[1] * block[1];
- VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded");
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
}
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(data_bufs_nc[dset_idx]);
}
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -1163,10 +1628,12 @@ test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group
*/
static void
test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t max_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
@@ -1175,13 +1642,16 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H
hsize_t stride[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t count[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to shared filtered chunks w/ single unlimited dimension");
+ puts("Testing write to shared filtered chunks w/ single unlimited dimension");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1212,32 +1682,35 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- read_buf = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
- for (i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+ read_bufs[dset_idx] = tmp_buf;
+ }
+
+ for (size_t i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
*/
@@ -1250,56 +1723,55 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H
start[0] = (hsize_t)mpi_rank * block[0];
start[1] = i * count[1] * block[1];
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
- block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- dset_id = H5Dopen2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids);
- memset(read_buf, 255, data_size);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ memset(read_bufs[dset_idx], 255, data_size);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- /* Verify correct data was written */
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ /* Verify the correct data was written */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
- /* Extend the dataset by count[1] chunks in the extensible dimension */
+ /* Extend the dataset(s) by count[1] chunks in the extensible dimension */
dataset_dims[1] += count[1] * block[1];
- VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded");
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
}
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
}
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(data_bufs_nc[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -1318,10 +1790,12 @@ test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H
*/
static void
test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t max_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
@@ -1330,13 +1804,16 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
hsize_t stride[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered chunks w/ two unlimited dimensions");
+ puts("Testing write to unshared filtered chunks w/ two unlimited dimensions");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1367,40 +1844,38 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- for (i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
- C_DATATYPE *tmp_realloc = NULL;
- size_t j;
-
+ for (size_t i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
/* Set selected dimensions */
sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- tmp_realloc = (C_DATATYPE *)realloc(data, data_size);
- VRFY((NULL != tmp_realloc), "realloc succeeded");
- data = tmp_realloc;
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = realloc(data_bufs_nc[dset_idx], data_size);
+ VRFY((NULL != tmp_buf), "realloc succeeded");
- tmp_realloc = (C_DATATYPE *)realloc(read_buf, data_size);
- VRFY((NULL != tmp_realloc), "realloc succeeded");
- read_buf = tmp_realloc;
+ for (size_t k = 0; k < data_size / sizeof(C_DATATYPE); k++)
+ tmp_buf[k] = (C_DATATYPE)(GEN_DATA(k) + dset_idx);
- for (j = 0; j < data_size / sizeof(*data); j++)
- data[j] = (C_DATATYPE)GEN_DATA(j);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ tmp_buf = realloc(read_bufs[dset_idx], data_size);
+ VRFY((NULL != tmp_buf), "realloc succeeded");
+
+ read_bufs[dset_idx] = tmp_buf;
+ }
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
@@ -1414,61 +1889,60 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
start[0] = ((hsize_t)mpi_rank * block[0] * count[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
- block[1]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids);
- memset(read_buf, 255, data_size);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ memset(read_bufs[dset_idx], 255, data_size);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
/* Verify the correct data was written */
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
/*
- * Extend the dataset by the size of one chunk per rank
- * in the first extensible dimension. Extend the dataset
+ * Extend the dataset(s) by the size of one chunk per rank
+ * in the first extensible dimension. Extend the dataset(s)
* by the size of chunk in the second extensible dimension.
*/
dataset_dims[0] += (hsize_t)mpi_size * block[0];
dataset_dims[1] += block[1];
- VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded");
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
}
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(data_bufs_nc[dset_idx]);
}
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -1485,10 +1959,12 @@ test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group,
*/
static void
test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t max_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
@@ -1497,13 +1973,16 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5
hsize_t stride[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t count[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to shared filtered chunks w/ two unlimited dimensions");
+ puts("Testing write to shared filtered chunks w/ two unlimited dimensions");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1534,40 +2013,38 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- for (i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
- C_DATATYPE *tmp_realloc = NULL;
- size_t j;
-
+ for (size_t i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
/* Set selected dimensions */
sel_dims[0] = (i + 1);
sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- tmp_realloc = (C_DATATYPE *)realloc(data, data_size);
- VRFY((NULL != tmp_realloc), "realloc succeeded");
- data = tmp_realloc;
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = realloc(data_bufs_nc[dset_idx], data_size);
+ VRFY((NULL != tmp_buf), "realloc succeeded");
- tmp_realloc = (C_DATATYPE *)realloc(read_buf, data_size);
- VRFY((NULL != tmp_realloc), "realloc succeeded");
- read_buf = tmp_realloc;
+ for (size_t k = 0; k < data_size / sizeof(C_DATATYPE); k++)
+ tmp_buf[k] = (C_DATATYPE)(GEN_DATA(k) + dset_idx);
- for (j = 0; j < data_size / sizeof(*data); j++)
- data[j] = (C_DATATYPE)GEN_DATA(j);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ tmp_buf = realloc(read_bufs[dset_idx], data_size);
+ VRFY((NULL != tmp_buf), "realloc succeeded");
+
+ read_bufs[dset_idx] = tmp_buf;
+ }
/* Each process defines the dataset selection in memory and writes
* it to the hyperslab in the file
@@ -1581,57 +2058,56 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
- block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- dset_id = H5Dopen2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, num_dsets, test_mode, dset_ids);
- memset(read_buf, 255, data_size);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ memset(read_bufs[dset_idx], 255, data_size);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- /* Verify correct data was written */
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ /* Verify the correct data was written */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
- /* Extend the dataset by the size of a chunk in each extensible dimension */
+ /* Extend the dataset(s) by the size of a chunk in each extensible dimension */
dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
- VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY(H5Dset_extent(dset_ids[dset_idx], dataset_dims) >= 0, "H5Dset_extent succeeded");
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
}
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
}
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(data_bufs_nc[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -1650,11 +2126,13 @@ test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5
*/
static void
test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -1662,14 +2140,16 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi
hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- size_t segment_length;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to filtered chunks with a single process having no selection");
+ puts("Testing write to filtered chunks with a single process having no selection");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1691,9 +2171,6 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi
filespace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -1704,12 +2181,12 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1726,80 +2203,85 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi
start[0] = (hsize_t)mpi_rank * (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
- VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+ select_none(num_dsets, dset_ids, fspace_ids);
else
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
if (mpi_rank != WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
- }
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
-
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
- (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+ }
- /* Compute the correct offset into the buffer for the process having no selection and clear it */
- segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size;
- memset(correct_buf + ((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
- 0, segment_length * sizeof(*data));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status - data should only have been written if MPI size > 1 */
- verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1 ? SOME_CHUNKS_WRITTEN : NO_CHUNKS_WRITTEN));
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id,
+ (mpi_size > 1 ? SOME_CHUNKS_WRITTEN : NO_CHUNKS_WRITTEN));
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++) {
+ size_t segment_length;
+
+ correct_bufs[dset_idx][j] =
+ (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx);
+
+ /* Compute the correct offset into the buffer for the process having no selection and clear it */
+ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size;
+ memset(correct_bufs[dset_idx] +
+ ((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(C_DATATYPE));
+ }
+ }
- dset_id = H5Dopen2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -1819,21 +2301,24 @@ test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fi
*/
static void
test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to filtered chunks with all processes having no selection");
+ puts("Testing write to filtered chunks with all processes having no selection");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1846,14 +2331,10 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte
dataset_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
chunk_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- sel_dims[0] = sel_dims[1] = 0;
filespace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -1864,65 +2345,73 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+ select_none(num_dsets, dset_ids, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
- /* Verify space allocation status - no ranks should have written any data */
- verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, NO_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
- dset_id = H5Dopen2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -1936,23 +2425,27 @@ test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filte
*/
static void
test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *read_buf = NULL;
- hsize_t *coords = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ hsize_t *coords = NULL;
hsize_t dataset_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- size_t i, j, data_size, correct_buf_size;
+ size_t data_size, correct_buf_size;
size_t num_points;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to filtered chunks with point selection");
+ puts("Testing write to filtered chunks with point selection");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -1971,9 +2464,6 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter
filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -1984,87 +2474,96 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Set up point selection */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
num_points = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS *
(hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size;
coords = (hsize_t *)calloc(1, 2 * num_points * sizeof(*coords));
VRFY((NULL != coords), "Coords calloc succeeded");
- for (i = 0; i < num_points; i++)
- for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
+ for (size_t i = 0; i < num_points; i++)
+ for (size_t j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
coords[(i * WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
(j > 0) ? (i % (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
: ((hsize_t)mpi_rank +
((hsize_t)mpi_size * (i / (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
- VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0),
- "Point selection succeeded");
+ select_elements(num_dsets, dset_ids, num_points, coords, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] =
- (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) +
- (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) +
+ (j % dataset_dims[1]) +
+ (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) %
+ dataset_dims[1]) +
+ dset_idx);
+ }
- dset_id = H5Dopen2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (coords)
- free(coords);
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ free(coords);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2082,11 +2581,12 @@ test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter
*/
static void
test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t sel_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
@@ -2094,13 +2594,16 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt
hsize_t stride[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing interleaved write to filtered chunks");
+ puts("Testing interleaved write to filtered chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2119,9 +2622,6 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt
filespace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -2132,12 +2632,12 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2155,80 +2655,81 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
-
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- /* Add Column Index */
- correct_buf[i] =
- (C_DATATYPE)((i % (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
-
- /* Add the Row Index */
- + ((i % (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) /
- (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- /* Add the amount that gets added when a rank moves down to its next section
- vertically in the dataset */
- + ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS *
- (i / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS))));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ /* Add the Column Index */
+ (C_DATATYPE)((j % (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+
+ /* Add the Row Index */
+ + ((j % (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) /
+ (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+
+ /* Add the amount that gets added when a rank moves down to its next section
+ vertically in the dataset */
+ + ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS *
+ (j / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)))
+
+ /* Add an increment factor for the multi-dataset case */
+ + dset_idx);
+ }
- dset_id = H5Dopen2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2251,11 +2752,13 @@ test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filt
*/
static void
test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -2263,13 +2766,16 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z
hsize_t stride[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared transformed and filtered chunks");
+ puts("Testing write to unshared transformed and filtered chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2288,9 +2794,6 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z
filespace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -2301,12 +2804,12 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
- HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2324,37 +2827,21 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z
start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
-
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
- (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
/* Create property list for data transform */
plist_id = H5Pcopy(dxpl_id);
@@ -2363,41 +2850,57 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z
/* Set data transform expression */
VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, plist_id, data_bufs,
+ test_mode);
- if (data)
- free(data);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
- /* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ /* Verify space allocation status */
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ /* Verify the correct data was written */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx);
+ }
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- /* Verify space allocation status */
- plist_id = H5Dget_create_plist(dset_id);
- VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2413,11 +2916,13 @@ test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z
*/
static void
test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
@@ -2425,13 +2930,16 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou
hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
+ puts("Testing write to unshared filtered chunks on separate pages in 3D dataset");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2453,9 +2961,6 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou
filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -2466,12 +2971,12 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
- HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2493,71 +2998,69 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou
start[1] = 0;
start[2] = (hsize_t)mpi_rank;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
- ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
- start[2], block[0], block[1], block[2]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ (C_DATATYPE)((j % (hsize_t)mpi_size) + (j / (hsize_t)mpi_size) + dset_idx);
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2573,11 +3076,13 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_grou
*/
static void
test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
@@ -2585,13 +3090,16 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H
hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
+ puts("Testing write to unshared filtered chunks on the same pages in 3D dataset");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2614,9 +3122,6 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H
H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -2627,12 +3132,12 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
- HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2653,72 +3158,69 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H
start[1] = 0;
start[2] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
- ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
- start[2], block[0], block[1], block[2]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
- (i / (dataset_dims[0] * dataset_dims[1])));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] = (C_DATATYPE)((j % (dataset_dims[0] * dataset_dims[1])) +
+ (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx);
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2734,11 +3236,12 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H
*/
static void
test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
@@ -2746,13 +3249,16 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi
hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to shared filtered chunks in 3D dataset");
+ puts("Testing write to shared filtered chunks in 3D dataset");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -2774,9 +3280,6 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi
filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -2787,12 +3290,12 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -2812,86 +3315,86 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi
start[1] = 0;
start[2] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE
- ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1],
- start[2], block[0], block[1], block[2]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
-
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- /* Add the Column Index */
- correct_buf[i] = (C_DATATYPE)((i % (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
-
- /* Add the Row Index */
- + ((i % (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
- (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- /* Add the amount that gets added when a rank moves down to its next
- section vertically in the dataset */
- + ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
- (i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, num_dsets, test_mode, dset_ids);
/* Verify the correct data was written */
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ /* Add the Column Index */
+ (C_DATATYPE)((j % (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the Row Index */
+ + ((j % (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
+ (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the amount that gets added when a rank moves down to its next
+ section vertically in the dataset */
+ + ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
+ (j / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
+
+ /* Add an increment factor for the multi-dataset case */
+ + dset_idx);
+ }
- dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -2906,27 +3409,31 @@ test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fi
*/
static void
test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- size_t i, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
- memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t memtype = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype "
- "conversion");
+ puts("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
/* SZIP and ScaleOffset filters don't support compound types */
if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
@@ -2946,17 +3453,11 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group
dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace =
- H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -2979,12 +3480,12 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
- dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
- memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3000,76 +3501,77 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group
start[0] = 0;
start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- data = (COMPOUND_C_DATATYPE *)calloc(
- 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
-
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
-
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short)GEN_DATA(i);
- data[i].field2 = (int)GEN_DATA(i);
- data[i].field3 = (long)GEN_DATA(i);
- }
-
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1]));
-
- correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC *
+ sizeof(COMPOUND_C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0;
+ j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; j++) {
+ tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx);
+ }
- correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
/* Verify the correct data was written */
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
- dset_id =
- H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ for (size_t j = 0; j < correct_buf_size / sizeof(COMPOUND_C_DATATYPE); j++) {
+ size_t val = (j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx;
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ correct_bufs[dset_idx][j].field1 = (short)val;
+ correct_bufs[dset_idx][j].field2 = (int)val;
+ correct_bufs[dset_idx][j].field3 = (long)val;
+ }
+ }
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3085,27 +3587,31 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group
*/
static void
test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- size_t i, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
- memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t memtype = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype "
- "conversion");
+ puts("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
/* SZIP and ScaleOffset filters don't support compound types */
if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
@@ -3125,17 +3631,11 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group,
dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
- sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace =
- H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -3158,12 +3658,12 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group,
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
- dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3179,82 +3679,80 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group,
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- data = (COMPOUND_C_DATATYPE *)calloc(
- 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
-
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
-
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short)GEN_DATA(i);
- data[i].field2 = (int)GEN_DATA(i);
- data[i].field3 = (long)GEN_DATA(i);
- }
-
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 =
- (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
-
- correct_buf[i].field2 =
- (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC *
+ sizeof(COMPOUND_C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0; j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ j++) {
+ tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx);
+ }
- correct_buf[i].field3 =
- (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs, test_mode);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
/* Verify the correct data was written */
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+
+ for (size_t j = 0; j < correct_buf_size / sizeof(COMPOUND_C_DATATYPE); j++) {
+ size_t val1 = (dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1])));
+ size_t val2 = (j % dataset_dims[1]);
+ size_t val3 = (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]);
+ size_t val = val1 + val2 + val3 + dset_idx;
+
+ correct_bufs[dset_idx][j].field1 = (short)val;
+ correct_bufs[dset_idx][j].field2 = (int)val;
+ correct_bufs[dset_idx][j].field3 = (long)val;
+ }
+ }
- dset_id =
- H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3276,27 +3774,31 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group,
*/
static void
test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- size_t i, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
- filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype "
- "conversion");
+ puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype "
+ "conversion");
/* Skip for MPI communicator size of 1 */
if (mpi_size == 1) {
@@ -3322,17 +3824,11 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro
dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
- sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -3363,12 +3859,12 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
- filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3384,73 +3880,147 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro
start[0] = 0;
start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
+
+ /* Fill data buffer */
+ data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC *
+ sizeof(COMPOUND_C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0;
+ j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; j++) {
+ tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx);
+ }
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ /*
+ * Ensure that this test currently fails in most cases since type
+ * conversions break collective mode when selection I/O is disabled
+ * and the library will currently disable selection I/O when filters
+ * are applied to a dataset.
+ */
+
+ /* NOTE: Once type conversions no longer break collective mode, remove
+ * the H5E_BEGIN/END_TRY block and switch to the following code instead
+ * of the H5Dwrite loop:
+ */
+ /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids,
+ dxpl_id, data_bufs, test_mode); */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ herr_t expected = FAIL;
+ herr_t ret;
+
+ /*
+ * Since this currently writes datasets one by one regardless of
+ * test mode, the write call could succeed if the dataset doesn't
+ * have any filters applied to it (can currently only happen when
+ * testing a mix of filtered and unfiltered datasets with the
+ * multi-dataset APIs).
+ */
+ if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) {
+ hid_t dset_dcpl;
+ int nfilters;
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]);
+ VRFY((dset_dcpl >= 0), "H5Dget_create_plist");
- data = (COMPOUND_C_DATATYPE *)calloc(
- 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
+ nfilters = H5Pget_nfilters(dset_dcpl);
+ VRFY((nfilters >= 0), "H5Pget_nfilters");
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+ if (nfilters == 0)
+ expected = SUCCEED;
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose");
+ }
- /* Fill data buffer */
- for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short)GEN_DATA(i);
- data[i].field2 = (int)GEN_DATA(i);
- data[i].field3 = (long)GEN_DATA(i);
- }
+ if (expected == SUCCEED)
+ ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id,
+ data_bufs[dset_idx]);
+ else {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id,
+ data_bufs[dset_idx]);
+ }
+ H5E_END_TRY
+ }
- /* Ensure that this test currently fails since type conversions break collective mode */
- H5E_BEGIN_TRY
- {
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded");
+ VRFY((ret == expected), "Dataset write");
+
+ if (expected == SUCCEED)
+ verify_chunk_opt_status(1, dxpl_id);
+ else
+ verify_chunk_opt_status(0, dxpl_id);
}
- H5E_END_TRY
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, NO_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
/* Verify that no data was written */
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- dset_id =
- H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ hid_t dset_dcpl;
+ int nfilters;
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]);
+ VRFY((dset_dcpl >= 0), "H5Dget_create_plist");
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ nfilters = H5Pget_nfilters(dset_dcpl);
+ VRFY((nfilters >= 0), "H5Pget_nfilters");
+
+ VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose");
+
+ /*
+ * TODO: For now, skip data verification for the datasets where
+ * writes with type conversion succeeded due to selection
+ * I/O being enabled.
+ */
+ if (nfilters == 0)
+ continue;
+
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
@@ -3473,27 +4043,30 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_gro
*/
static void
test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *data = NULL;
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- size_t i, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs(
- "Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
/* Skip for MPI communicator size of 1 */
if (mpi_size == 1) {
@@ -3519,17 +4092,11 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group
dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
- sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace =
- H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -3560,12 +4127,12 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
- filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -3581,73 +4148,147 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
+
+ /* Fill data buffer */
+ data_size = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC *
+ sizeof(COMPOUND_C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = (COMPOUND_C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0;
+ j < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; j++) {
+ tmp_buf[j].field1 = (short)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field2 = (int)(GEN_DATA(j) + dset_idx);
+ tmp_buf[j].field3 = (long)(GEN_DATA(j) + dset_idx);
+ }
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ /*
+ * Ensure that this test currently fails in most cases since type
+ * conversions break collective mode when selection I/O is disabled
+ * and the library will currently disable selection I/O when filters
+ * are applied to a dataset.
+ */
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ /* NOTE: Once type conversions no longer break collective mode, remove
+ * the H5E_BEGIN/END_TRY block and switch to the following code instead
+ * of the H5Dwrite loop:
+ */
+ /* write_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids,
+ dxpl_id, data_bufs, test_mode); */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ herr_t expected = FAIL;
+ herr_t ret;
- data = (COMPOUND_C_DATATYPE *)calloc(
- 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
- VRFY((NULL != data), "calloc succeeded");
+ /*
+ * Since this currently writes datasets one by one regardless of
+ * test mode, the write call could succeed if the dataset doesn't
+ * have any filters applied to it (can currently only happen when
+ * testing a mix of filtered and unfiltered datasets with the
+ * multi-dataset APIs).
+ */
+ if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) {
+ hid_t dset_dcpl;
+ int nfilters;
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+ dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]);
+ VRFY((dset_dcpl >= 0), "H5Dget_create_plist");
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ nfilters = H5Pget_nfilters(dset_dcpl);
+ VRFY((nfilters >= 0), "H5Pget_nfilters");
- /* Fill data buffer */
- for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
- data[i].field1 = (short)GEN_DATA(i);
- data[i].field2 = (int)GEN_DATA(i);
- data[i].field3 = (long)GEN_DATA(i);
- }
+ if (nfilters == 0)
+ expected = SUCCEED;
- /* Ensure that this test currently fails since type conversions break collective mode */
- H5E_BEGIN_TRY
- {
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded");
+ VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose");
+ }
+
+ if (expected == SUCCEED)
+ ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id,
+ data_bufs[dset_idx]);
+ else {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dset_ids[dset_idx], memtype, H5S_BLOCK, fspace_ids[dset_idx], dxpl_id,
+ data_bufs[dset_idx]);
+ }
+ H5E_END_TRY
+ }
+
+ VRFY((ret == expected), "Dataset write");
+
+ if (expected == SUCCEED)
+ verify_chunk_opt_status(1, dxpl_id);
+ else
+ verify_chunk_opt_status(0, dxpl_id);
}
- H5E_END_TRY
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, NO_CHUNKS_WRITTEN);
+
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- if (data)
- free(data);
+ open_datasets(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
/* Verify that no data was written */
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- dset_id =
- H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ hid_t dset_dcpl;
+ int nfilters;
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]);
+ VRFY((dset_dcpl >= 0), "H5Dget_create_plist");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ nfilters = H5Pget_nfilters(dset_dcpl);
+ VRFY((nfilters >= 0), "H5Pget_nfilters");
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose");
+
+ /*
+ * TODO: For now, skip data verification for the datasets where
+ * writes with type conversion succeeded due to selection
+ * I/O being enabled.
+ */
+ if (nfilters == 0)
+ continue;
+
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
@@ -3669,11 +4310,12 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group
*/
static void
test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t sel_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
@@ -3682,30 +4324,79 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt
hsize_t count[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from one-chunk filtered dataset");
+ puts("Testing read from one-chunk filtered dataset");
dataset_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = ((C_DATATYPE)i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = ((C_DATATYPE)j % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
+ READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) +
+ ((C_DATATYPE)j / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) +
- ((C_DATATYPE)i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
- READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+ (C_DATATYPE)dset_idx;
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -3722,52 +4413,37 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ open_datasets(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- dset_id = H5Dcreate2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids);
sel_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size;
sel_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
@@ -3775,13 +4451,6 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -3795,61 +4464,54 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt
start[0] = ((hsize_t)mpi_rank * sel_dims[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
-
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)flat_dims[0];
+ displs[i] = (int)(i * flat_dims[0]);
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf,
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0]);
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
- displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -3868,11 +4530,12 @@ test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filt
*/
static void
test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -3881,29 +4544,79 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil
hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from unshared filtered chunks");
+ puts("Testing read from unshared filtered chunks");
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS *
- (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf);
+ data_size = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS *
+ sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] =
+ (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
- (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -3920,52 +4633,37 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
+ select_all(num_dsets, dset_ids, fspace_ids);
- VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
@@ -3973,13 +4671,6 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and reads
* it to the selection in memory
@@ -3993,61 +4684,54 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil
start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)flat_dims[0];
+ displs[i] = (int)(i * flat_dims[0]);
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf,
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0]);
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
- displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4067,11 +4751,12 @@ test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t fil
*/
static void
test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -4080,30 +4765,80 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter
hsize_t count[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from shared filtered chunks");
+ puts("Testing read from shared filtered chunks");
dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] =
- (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) +
- (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) +
+ (j % dataset_dims[1]) +
+ (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) %
+ dataset_dims[1]) +
+ dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -4120,52 +4855,37 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
+ select_all(num_dsets, dset_ids, fspace_ids);
- VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
@@ -4173,13 +4893,6 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -4193,77 +4906,71 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter
start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ global_buf = calloc(1, data_size);
VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
/*
* Since these chunks are shared, run multiple rounds of MPI_Allgatherv
* to collect all of the pieces into their appropriate locations. The
- * number of times MPI_Allgatherv is run should be equal to the number
- * of chunks in the first dimension of the dataset.
+ * number of times MPI_Allgatherv is run for each dataset should be equal
+ * to the number of chunks in the first dimension of the dataset.
*/
- {
- size_t loop_count = count[0];
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
size_t total_recvcounts = 0;
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
- VRFY((NULL != recvcounts), "calloc succeeded");
-
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t j = 0; j < (size_t)mpi_size; j++) {
+ recvcounts[j] = (int)dataset_dims[1];
+ total_recvcounts += (size_t)recvcounts[j];
- for (i = 0; i < (size_t)mpi_size; i++) {
- recvcounts[i] = (int)dataset_dims[1];
- total_recvcounts += (size_t)recvcounts[i];
+ displs[j] = (int)(j * dataset_dims[1]);
}
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * dataset_dims[1]);
+ for (size_t loop_count = count[0]; loop_count; loop_count--) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)read_bufs[dset_idx];
+ C_DATATYPE *tmp_glob_buf = (C_DATATYPE *)global_buf;
- for (; loop_count; loop_count--) {
- VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]],
- recvcounts[mpi_rank], C_DATATYPE_MPI,
- &global_buf[(count[0] - loop_count) * total_recvcounts],
- recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ mpi_code =
+ MPI_Allgatherv(&tmp_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank],
+ C_DATATYPE_MPI, &tmp_glob_buf[(count[0] - loop_count) * total_recvcounts],
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
}
+
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4283,11 +4990,13 @@ test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter
*/
static void
test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
@@ -4296,34 +5005,85 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil
hsize_t count[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- size_t segment_length;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from filtered chunks with a single process having no selection");
+ puts("Testing read from filtered chunks with a single process having no selection");
dataset_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ size_t segment_length;
+
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] =
+ (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx);
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
- (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+ /* Compute the correct offset into the buffer for the process having no selection and clear it */
+ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size;
+ memset(tmp_buf + ((size_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(C_DATATYPE));
- /* Compute the correct offset into the buffer for the process having no selection and clear it */
- segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size;
- memset(correct_buf + ((size_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
- 0, segment_length * sizeof(*correct_buf));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -4340,53 +5100,39 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace =
- H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ open_datasets(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- dset_id = H5Dcreate2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME,
- HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
sel_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
sel_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -4397,13 +5143,6 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -4418,78 +5157,70 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil
start[0] = (hsize_t)mpi_rank * (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
- VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+ select_none(num_dsets, dset_ids, fspace_ids);
else
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, NULL) >= 0),
- "Dataset read succeeded");
- }
- else {
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ if (mpi_rank != READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
}
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS *
READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS);
+ displs[i] = (int)(i * (size_t)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS *
+ READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS));
+ }
+
recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0;
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], 0, C_DATATYPE_MPI, global_buf, recvcounts, displs,
+ C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
+ }
+ else {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf,
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
+ }
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * (size_t)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS *
- READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS));
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
+
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs,
- C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
- else
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf,
- recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
-
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
-
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
-
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4510,29 +5241,81 @@ test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_fil
*/
static void
test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
- size_t read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing read from filtered chunks with all processes having no selection");
+ puts("Testing read from filtered chunks with all processes having no selection");
dataset_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ /* Fill buffer with garbage data before write call */
+ memset(tmp_buf, 255, data_size);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -4549,82 +5332,75 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ open_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
+ select_all(num_dsets, dset_ids, fspace_ids);
- VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
sel_dims[0] = sel_dims[1] = 0;
- memspace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+ select_none(num_dsets, dset_ids, fspace_ids);
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
- VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+ /* Clear data buffer that will be used for comparison since
+ * no data should end up being read
+ */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ memset(data_bufs_nc[dset_idx], 0, data_size);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4644,41 +5420,92 @@ test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter
*/
static void
test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *global_buf = NULL;
- hsize_t *coords = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
+ hsize_t *coords = NULL;
hsize_t dataset_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, j, read_buf_size, correct_buf_size;
+ size_t data_size, read_buf_size;
size_t num_points;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from filtered chunks with point selection");
+ puts("Testing read from filtered chunks with point selection");
dataset_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)((dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1]))) +
+ (j % dataset_dims[1]) +
+ (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) %
+ dataset_dims[1]) +
+ dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] =
- (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) +
- (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -4695,52 +5522,39 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
+ open_datasets(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ select_all(num_dsets, dset_ids, fspace_ids);
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
sel_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size;
sel_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -4748,92 +5562,87 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Set up point selection */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
num_points = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS *
(hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size;
coords = (hsize_t *)calloc(1, 2 * num_points * sizeof(*coords));
VRFY((NULL != coords), "Coords calloc succeeded");
- for (i = 0; i < num_points; i++)
- for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
+ for (size_t i = 0; i < num_points; i++)
+ for (size_t j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
coords[(i * READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
(j > 0) ? (i % (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
: ((hsize_t)mpi_rank +
((hsize_t)mpi_size * (i / (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
- VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0),
- "Point selection succeeded");
+ select_elements(num_dsets, dset_ids, num_points, coords, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ global_buf = calloc(1, data_size);
VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
/*
* Since these chunks are shared, run multiple rounds of MPI_Allgatherv
* to collect all of the pieces into their appropriate locations. The
- * number of times MPI_Allgatherv is run should be equal to the number
- * of chunks in the first dimension of the dataset.
+ * number of times MPI_Allgatherv is run for each dataset should be equal
+ * to the number of chunks in the first dimension of the dataset.
*/
- {
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
size_t original_loop_count = dataset_dims[0] / (hsize_t)mpi_size;
- size_t cur_loop_count = original_loop_count;
size_t total_recvcounts = 0;
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
- VRFY((NULL != recvcounts), "calloc succeeded");
+ for (size_t j = 0; j < (size_t)mpi_size; j++) {
+ recvcounts[j] = (int)dataset_dims[1];
+ total_recvcounts += (size_t)recvcounts[j];
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
-
- for (i = 0; i < (size_t)mpi_size; i++) {
- recvcounts[i] = (int)dataset_dims[1];
- total_recvcounts += (size_t)recvcounts[i];
+ displs[j] = (int)(j * dataset_dims[1]);
}
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * dataset_dims[1]);
+ for (size_t cur_loop_count = original_loop_count; cur_loop_count; cur_loop_count--) {
+ C_DATATYPE *tmp_buf = read_bufs[dset_idx];
+ C_DATATYPE *tmp_glob_buf = (C_DATATYPE *)global_buf;
- for (; cur_loop_count; cur_loop_count--) {
- VRFY((MPI_SUCCESS ==
- MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]],
- recvcounts[mpi_rank], C_DATATYPE_MPI,
- &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts],
- recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ mpi_code = MPI_Allgatherv(
+ &tmp_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], recvcounts[mpi_rank],
+ C_DATATYPE_MPI, &tmp_glob_buf[(original_loop_count - cur_loop_count) * total_recvcounts],
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
}
+
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
free(coords);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
+
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -4856,11 +5665,12 @@ test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_
*/
static void
test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t chunk_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t sel_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
@@ -4869,38 +5679,89 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter
hsize_t count[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing interleaved read from filtered chunks");
+ puts("Testing interleaved read from filtered chunks");
dataset_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] =
+ /* Add the Column Index */
+ (C_DATATYPE)((j % (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+
+ /* Add the Row Index */
+ + ((j % (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) /
+ (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+
+ /* Add the amount that gets added when a rank moves down to its next section
+ vertically in the dataset */
+ + ((hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS *
+ (j / (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)))
+
+ /* Add an increment factor for the multi-dataset case */
+ + dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- /* Add Column Index */
- correct_buf[i] =
- (C_DATATYPE)((i % (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
- /* Add the Row Index */
- + ((i % (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) /
- (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
- /* Add the amount that gets added when a rank moves down to its next section
- vertically in the dataset */
- + ((hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS *
- (i / (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS))));
+ VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -4917,52 +5778,37 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
- chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ open_datasets(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- dset_id = H5Dcreate2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, num_dsets, test_mode, dset_ids);
sel_dims[0] = (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size);
sel_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
@@ -4970,13 +5816,6 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -4992,27 +5831,25 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ global_buf = calloc(1, data_size);
VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
/*
* Since these chunks are shared, run multiple rounds of MPI_Allgatherv
@@ -5020,49 +5857,45 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter
* number of times MPI_Allgatherv is run should be equal to the number
* of chunks in the first dimension of the dataset.
*/
- {
- size_t loop_count = count[0];
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
size_t total_recvcounts = 0;
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
- VRFY((NULL != recvcounts), "calloc succeeded");
-
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t j = 0; j < (size_t)mpi_size; j++) {
+ recvcounts[j] = (int)dataset_dims[1];
+ total_recvcounts += (size_t)recvcounts[j];
- for (i = 0; i < (size_t)mpi_size; i++) {
- recvcounts[i] = (int)dataset_dims[1];
- total_recvcounts += (size_t)recvcounts[i];
+ displs[j] = (int)(j * dataset_dims[1]);
}
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * dataset_dims[1]);
+ for (size_t loop_count = count[0]; loop_count; loop_count--) {
+ C_DATATYPE *tmp_buf = read_bufs[dset_idx];
+ C_DATATYPE *tmp_glob_buf = (C_DATATYPE *)global_buf;
- for (; loop_count; loop_count--) {
- VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]],
- recvcounts[mpi_rank], C_DATATYPE_MPI,
- &global_buf[(count[0] - loop_count) * total_recvcounts],
- recvcounts, displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ mpi_code =
+ MPI_Allgatherv(&tmp_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank],
+ C_DATATYPE_MPI, &tmp_glob_buf[(count[0] - loop_count) * total_recvcounts],
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
}
+
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5082,13 +5915,15 @@ test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter
*/
static void
test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
MPI_Datatype vector_type;
MPI_Datatype resized_vector_type;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
@@ -5097,26 +5932,76 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group
hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
+ puts("Testing read from unshared filtered chunks on separate pages in 3D dataset");
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)((j % (hsize_t)mpi_size) + (j / (hsize_t)mpi_size) + dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size));
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5133,55 +6018,39 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace =
- H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
- chunk_dims[2] = 1;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY(
- (H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
+ open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
- HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
@@ -5190,13 +6059,6 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -5216,63 +6078,66 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group
start[1] = 0;
start[2] = (hsize_t)mpi_rank;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ global_buf = calloc(1, data_size);
VRFY((NULL != global_buf), "calloc succeeded");
/*
* Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
* rank to write to the nth position of the global data buffer, where n is the rank number.
*/
- VRFY((MPI_SUCCESS == MPI_Type_vector((int)flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)),
- "MPI_Type_vector succeeded");
- VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
+ mpi_code = MPI_Type_vector((int)flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_vector succeeded");
+ mpi_code = MPI_Type_commit(&vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded");
/*
* Resize the type to allow interleaving,
* so make it only one MPI_LONG wide
*/
- VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type)),
- "MPI_Type_create_resized");
- VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
+ mpi_code = MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_create_resized");
+ mpi_code = MPI_Type_commit(&resized_vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded");
- VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1,
- resized_vector_type, comm)),
- "MPI_Allgather succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgather(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1,
+ resized_vector_type, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgather succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
- VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
+ mpi_code = MPI_Type_free(&vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded");
+ mpi_code = MPI_Type_free(&resized_vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded");
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ free(global_buf);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5299,11 +6164,13 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group
*/
static void
test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
@@ -5312,29 +6179,79 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_
hsize_t count[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from unshared transformed and filtered chunks");
+ puts("Testing read from unshared transformed and filtered chunks");
dataset_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS *
- (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf);
+ data_size = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS *
+ (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] =
+ (C_DATATYPE)((j % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
+ (j / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
- (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5351,34 +6268,8 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace =
- H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY(
- (H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
- HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
-
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ open_datasets(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
/* Create property list for collective dataset read */
plist_id = H5Pcreate(H5P_DATASET_XFER);
@@ -5387,30 +6278,38 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_
/* Set data transform expression */
VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, correct_buf) >= 0),
- "Dataset write succeeded");
+ select_all(num_dsets, dset_ids, fspace_ids);
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
/* Verify space allocation status */
- plist_id = H5Dget_create_plist(dset_id);
+ plist_id = H5Dget_create_plist(dset_ids[0]);
VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
sel_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
sel_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
@@ -5418,13 +6317,6 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and reads
* it to the selection in memory
@@ -5439,16 +6331,7 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_
start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Create property list for data transform */
plist_id = H5Pcopy(dxpl_id);
@@ -5457,50 +6340,52 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_
/* Set data transform expression */
VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
-
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)flat_dims[0];
+ displs[i] = (int)(i * flat_dims[0]);
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf,
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0]);
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
- displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5522,11 +6407,13 @@ test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_
*/
static void
test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
@@ -5535,29 +6422,79 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5
hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
+ puts("Testing read from unshared filtered chunks on the same pages in 3D dataset");
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)((j % (dataset_dims[0] * dataset_dims[1])) +
+ (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
- (i / (dataset_dims[0] * dataset_dims[1])));
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5574,55 +6511,39 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace =
- H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
- chunk_dims[2] = 1;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >=
- 0),
- "Chunk size set");
+ open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
- HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, num_dsets, test_mode,
+ dset_ids);
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
@@ -5631,13 +6552,6 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -5656,61 +6570,54 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5
start[1] = 0;
start[2] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
-
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)flat_dims[0];
+ displs[i] = (int)(i * flat_dims[0]);
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf,
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0]);
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts,
- displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5731,13 +6638,14 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5
*/
static void
test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
MPI_Datatype vector_type;
MPI_Datatype resized_vector_type;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
- C_DATATYPE *global_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
@@ -5746,41 +6654,95 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil
hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from shared filtered chunks in 3D dataset");
+ puts("Testing read from shared filtered chunks in 3D dataset");
dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
dataset_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
+
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] =
+ /* Add the Column Index */
+ (C_DATATYPE)((j % (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the Row Index */
+ + ((j % (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
+ (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the amount that gets added when a rank moves down to its next
+ section vertically in the dataset */
+ + ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
+ (j / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
+ READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
+
+ /* Add an increment factor for the multi-dataset case */
+ + dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- /* Add the Column Index */
- correct_buf[i] = (C_DATATYPE)((i % (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Add the Row Index */
- + ((i % (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) /
- (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
- /* Add the amount that gets added when a rank moves down to its next
- section vertically in the dataset */
- + ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) *
- (i / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH *
- READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5797,53 +6759,37 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
- chunk_dims[2] = 1;
+ open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, num_dsets, test_mode, dset_ids);
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
+ select_all(num_dsets, dset_ids, fspace_ids);
- VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, num_dsets, test_mode, dset_ids);
sel_dims[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
@@ -5852,13 +6798,6 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -5876,26 +6815,20 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil
start[1] = 0;
start[2] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- global_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ global_buf = calloc(1, data_size);
VRFY((NULL != global_buf), "calloc succeeded");
{
@@ -5907,41 +6840,49 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil
* Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
* rank to write to the nth position of the global data buffer, where n is the rank number.
*/
- VRFY(
- (MPI_SUCCESS == MPI_Type_vector((int)num_blocks, (int)run_length,
- (int)(mpi_size * (int)run_length), C_DATATYPE_MPI, &vector_type)),
- "MPI_Type_vector succeeded");
- VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
+ mpi_code = MPI_Type_vector((int)num_blocks, (int)run_length, (int)(mpi_size * (int)run_length),
+ C_DATATYPE_MPI, &vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_vector succeeded");
+ mpi_code = MPI_Type_commit(&vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded");
/*
* Resize the type to allow interleaving,
* so make it "run_length" MPI_LONGs wide
*/
- VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint)(run_length * sizeof(long)),
- &resized_vector_type)),
- "MPI_Type_create_resized");
- VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
+ mpi_code = MPI_Type_create_resized(vector_type, 0, (MPI_Aint)(run_length * sizeof(long)),
+ &resized_vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_create_resized");
+ mpi_code = MPI_Type_commit(&resized_vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_commit succeeded");
}
- VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1,
- resized_vector_type, comm)),
- "MPI_Allgatherv succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgather(read_bufs[dset_idx], (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1,
+ resized_vector_type, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
- VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
+ mpi_code = MPI_Type_free(&vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded");
+ mpi_code = MPI_Type_free(&resized_vector_type);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Type_free succeeded");
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ free(global_buf);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -5961,30 +6902,36 @@ test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t fil
*/
static void
test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
- hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
- memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
- int *recvcounts = NULL;
- int *displs = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t memtype = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype "
- "conversion");
+ puts("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
/* SZIP and ScaleOffset filters don't support compound types */
if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
@@ -5997,17 +6944,20 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
-
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) {
+ tmp_buf[j].field1 = (short)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx);
+ tmp_buf[j].field2 = (int)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx);
+ tmp_buf[j].field3 = (long)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx);
+ }
- correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
/* Create the compound type for memory. */
@@ -6021,6 +6971,48 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
+ dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
+
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -6036,55 +7028,38 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
- dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
- chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
- chunk_dims) >= 0),
- "Chunk size set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
- memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id =
- H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
@@ -6092,13 +7067,6 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -6112,60 +7080,53 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
start[0] = 0;
start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
-
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
-
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)),
+ MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
- global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -6186,30 +7147,36 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group,
*/
static void
test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
- hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
- memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
- int *recvcounts = NULL;
- int *displs = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t memtype = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype "
- "conversion");
+ puts("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype "
+ "conversion");
/* SZIP and ScaleOffset filters don't support compound types */
if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
@@ -6222,23 +7189,25 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 =
- (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) {
+ size_t val1 = (dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1])));
+ size_t val2 = (j % dataset_dims[1]);
+ size_t val3 = (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]);
+ size_t val = val1 + val2 + val3 + dset_idx;
- correct_buf[i].field2 =
- (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ tmp_buf[j].field1 = (short)val;
+ tmp_buf[j].field2 = (int)val;
+ tmp_buf[j].field3 = (long)val;
+ }
- correct_buf[i].field3 =
- (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
/* Create the compound type for memory. */
@@ -6252,6 +7221,48 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace =
+ H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
+
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -6267,55 +7278,38 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
- dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ select_all(num_dsets, dset_ids, fspace_ids);
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
- chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME,
- memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id =
- H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
@@ -6323,13 +7317,6 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -6343,60 +7330,53 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE);
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
-
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)),
+ MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
- global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -6417,30 +7397,36 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H
*/
static void
test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
- hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
- hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
- int *recvcounts = NULL;
- int *displs = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype "
- "conversion");
+ puts("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype "
+ "conversion");
/* SZIP and ScaleOffset filters don't support compound types */
if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
@@ -6453,17 +7439,20 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
-
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) {
+ tmp_buf[j].field1 = (short)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx);
+ tmp_buf[j].field2 = (int)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx);
+ tmp_buf[j].field3 = (long)((j % dataset_dims[1]) + (j / dataset_dims[1]) + dset_idx);
+ }
- correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
/* Create the compound type for memory. */
@@ -6485,6 +7474,48 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
+ dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
+
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -6500,55 +7531,38 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
- dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
- chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
- chunk_dims) >= 0),
- "Chunk size set");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ num_dsets, test_mode, dset_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
- filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id =
- H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
@@ -6556,13 +7570,6 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -6576,60 +7583,53 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou
start[0] = 0;
start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE);
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
-
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)),
+ MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
- global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
@@ -6651,29 +7651,35 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_grou
*/
static void
test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
- hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id,
+ test_mode_t test_mode)
{
- COMPOUND_C_DATATYPE *read_buf = NULL;
- COMPOUND_C_DATATYPE *correct_buf = NULL;
- COMPOUND_C_DATATYPE *global_buf = NULL;
- hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
- hsize_t flat_dims[1];
- size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
- int *recvcounts = NULL;
- int *displs = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs(
+ puts(
"Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
/* SZIP and ScaleOffset filters don't support compound types */
@@ -6687,23 +7693,25 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
/* Setup the buffer for writing and for comparison */
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+ data_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
- correct_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ COMPOUND_C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
- correct_buf[i].field1 =
- (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ for (size_t j = 0; j < data_size / sizeof(COMPOUND_C_DATATYPE); j++) {
+ size_t val1 = (dataset_dims[1] * (j / ((hsize_t)mpi_size * dataset_dims[1])));
+ size_t val2 = (j % dataset_dims[1]);
+ size_t val3 = (((j % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]);
+ size_t val = val1 + val2 + val3 + dset_idx;
- correct_buf[i].field2 =
- (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ tmp_buf[j].field1 = (short)val;
+ tmp_buf[j].field2 = (int)val;
+ tmp_buf[j].field3 = (long)val;
+ }
- correct_buf[i].field3 =
- (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
- (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
}
/* Create the compound type for memory. */
@@ -6725,6 +7733,48 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
+ dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
+ chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
+
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -6740,55 +7790,38 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
- dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
-
- /* Create chunked dataset */
- chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
- chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
-
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
-
- VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
- chunk_dims) >= 0),
- "Chunk size set");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+ select_all(num_dsets, dset_ids, fspace_ids);
- dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
- filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ write_datasets(num_dsets, dset_ids, memtype, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs, test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
-
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
-
- VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
- "Dataset write succeeded");
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
+ MPI_Barrier(comm);
+
file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id =
- H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, num_dsets,
+ test_mode, dset_ids);
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
@@ -6796,13 +7829,6 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
/* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
flat_dims[0] = sel_dims[0] * sel_dims[1];
- memspace = H5Screate_simple(1, flat_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
/*
* Each process defines the dataset selection in the file and
* reads it to the selection in memory
@@ -6816,60 +7842,53 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- read_buf_size = flat_dims[0] * sizeof(*read_buf);
+ read_buf_size = flat_dims[0] * sizeof(COMPOUND_C_DATATYPE);
- read_buf = (COMPOUND_C_DATATYPE *)calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
-
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
- global_buf = (COMPOUND_C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != global_buf), "calloc succeeded");
+ read_datasets(num_dsets, dset_ids, memtype, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs, test_mode);
/* Collect each piece of data from all ranks into a global buffer on all ranks */
- recvcounts = (int *)calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ global_buf = calloc(1, data_size);
+ VRFY((NULL != global_buf), "calloc succeeded");
+ recvcounts = calloc(1, (size_t)mpi_size * sizeof(*recvcounts));
VRFY((NULL != recvcounts), "calloc succeeded");
+ displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf));
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ displs[i] = (int)(i * flat_dims[0] * sizeof(COMPOUND_C_DATATYPE));
+ }
- displs = (int *)calloc(1, (size_t)mpi_size * sizeof(*displs));
- VRFY((NULL != displs), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(read_bufs[dset_idx], (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)),
+ MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- for (i = 0; i < (size_t)mpi_size; i++)
- displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf));
+ VRFY((0 == memcmp(global_buf, data_bufs[dset_idx], data_size)), "Data verification succeeded");
+ }
- VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE,
- global_buf, recvcounts, displs, MPI_BYTE, comm)),
- "MPI_Allgatherv succeeded");
+ free(displs);
+ free(recvcounts);
+ free(global_buf);
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(read_bufs[dset_idx]);
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (global_buf)
- free(global_buf);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -6887,25 +7906,70 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group,
*/
static void
test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write file serially; read file in parallel");
+ puts("Testing write file serially; read file in parallel");
dataset_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NROWS;
dataset_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NCOLS;
dataset_dims[2] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_DEPTH;
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ MPI_Barrier(comm);
+
/* Write the file on the MAINPROCESS rank */
if (MAINPROCESS) {
/* Set up file access property list */
@@ -6923,66 +7987,58 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- /* Create the dataspace for the dataset */
- chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS;
- chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS;
- chunk_dims[2] = 1;
-
- filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL);
- VRFY((filespace >= 0), "File dataspace creation succeeded");
+ open_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, num_dsets, test_mode, dset_ids);
- /* Create chunked dataset */
- plist_id = H5Pcopy(dcpl_id);
- VRFY((plist_id >= 0), "DCPL copy succeeded");
+ data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
- VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0),
- "Chunk size set");
-
- /* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
-
- dset_id = H5Dcreate2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
-
- /* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ select_all(num_dsets, dset_ids, fspace_ids);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, fspace_ids, H5P_DEFAULT, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ plist_id = H5Dget_create_plist(dset_ids[0]);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ MPI_Barrier(comm);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (long)i;
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] = (long)(j + dset_idx);
+ }
/* All ranks open the file and verify their "portion" of the dataset is correct */
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
@@ -6991,20 +8047,22 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, num_dsets, test_mode, dset_ids);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- if (correct_buf)
- free(correct_buf);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -7022,11 +8080,12 @@ test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id
*/
static void
test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
- hid_t dcpl_id, hid_t dxpl_id)
+ hid_t dcpl_id, hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
hsize_t chunk_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
hsize_t sel_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
@@ -7034,13 +8093,16 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id
hsize_t stride[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
- size_t i, data_size, correct_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, correct_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write file in parallel; read serially");
+ puts("Testing write file in parallel; read serially");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -7062,9 +8124,6 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id
filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -7075,12 +8134,12 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -7100,48 +8159,42 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id
offset[1] = 0;
offset[2] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], offset[ %" PRIuHSIZE
- ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
- " ]\n",
- mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1],
- offset[2], block[0], block[1], block[2]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, offset, stride, count, block, fspace_ids);
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(C_DATATYPE);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- /* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ free(data_bufs_nc[dset_idx]);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (data)
- free(data);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ MPI_Barrier(comm);
+
if (MAINPROCESS) {
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -7157,34 +8210,43 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id
group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, num_dsets, test_mode, dset_ids);
- correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(C_DATATYPE);
- correct_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ correct_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, correct_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
- read_buf = (C_DATATYPE *)calloc(1, correct_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t j = 0; j < correct_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] =
+ (C_DATATYPE)((j % (dataset_dims[0] * dataset_dims[1])) +
+ (j / (dataset_dims[0] * dataset_dims[1])) + dset_idx);
+ }
- for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
- correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
- (i / (dataset_dims[0] * dataset_dims[1])));
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_bufs,
+ test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], correct_buf_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
-
- free(correct_buf);
- free(read_buf);
}
+ MPI_Barrier(comm);
+
return;
}
@@ -7197,24 +8259,28 @@ test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id
*/
static void
test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id)
+ hid_t dxpl_id, test_mode_t test_mode)
{
- double *data = NULL;
- double *read_buf = NULL;
- hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ size_t data_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing continually shrinking/growing chunks");
+ puts("Testing continually shrinking/growing chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -7233,9 +8299,6 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id,
filespace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
- memspace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, sel_dims, NULL);
- VRFY((memspace >= 0), "Memory dataspace creation succeeded");
-
/* Create chunked dataset */
plist_id = H5Pcopy(dcpl_id);
VRFY((plist_id >= 0), "DCPL copy succeeded");
@@ -7245,12 +8308,12 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id,
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -7267,68 +8330,74 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id,
start[0] = ((hsize_t)mpi_rank * (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]);
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(double);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ double *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- data_size = sel_dims[0] * sel_dims[1] * sizeof(double);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (double)(GEN_DATA(j) + dset_idx);
- data = (double *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
- read_buf = (double *)calloc(1, data_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
- /* Continually write random float data, followed by zeroed-out data */
- if (i % 2)
- memset(data, 0, data_size);
- else {
- size_t j;
- for (j = 0; j < data_size / sizeof(*data); j++) {
- data[j] = (rand() / (double)(RAND_MAX / (double)1.0L));
+ read_bufs[dset_idx] = tmp_buf;
+ }
+
+ for (size_t i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ /* Continually write random float data, followed by zeroed-out data */
+ if (i % 2)
+ memset(data_bufs_nc[dset_idx], 0, data_size);
+ else {
+ double *tmp_buf = data_bufs_nc[dset_idx];
+
+ for (size_t k = 0; k < data_size / sizeof(double); k++) {
+ tmp_buf[k] = (rand() / (double)(RAND_MAX / (double)1.0L));
+ }
}
}
- VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- if (i % 2) {
- memset(read_buf, 255, data_size);
- }
- else {
- memset(read_buf, 0, data_size);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ if (i % 2) {
+ memset(read_bufs[dset_idx], 255, data_size);
+ }
+ else {
+ memset(read_bufs[dset_idx], 0, data_size);
+ }
}
- VRFY((H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, H5T_NATIVE_DOUBLE, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- VRFY((0 == memcmp(read_buf, data, data_size)), "data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "data verification succeeded");
}
- if (read_buf)
- free(read_buf);
- if (data)
- free(data);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(data_bufs_nc[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -7347,10 +8416,11 @@ test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id,
*/
static void
test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id)
+ hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
@@ -7358,13 +8428,16 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi
hsize_t stride[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t count[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to unshared filtered edge chunks");
+ puts("Testing write to unshared filtered edge chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -7393,12 +8466,12 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -7415,64 +8488,68 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi
start[1] =
(hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- /* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
- read_buf = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ read_bufs[dset_idx] = tmp_buf;
+ }
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id,
+ (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ /* Close and re-open datasets */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
/* Repeat the previous, but set option to not filter partial edge chunks */
if (MAINPROCESS)
- HDputs("Testing write to unshared unfiltered edge chunks");
+ puts("Testing write to unshared unfiltered edge chunks");
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
- dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME,
+ filespace, plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -7489,48 +8566,43 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi
start[1] =
(hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id,
+ (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, num_dsets, test_mode,
+ dset_ids);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ memset(read_bufs[dset_idx], 255, data_size);
- memset(read_buf, 255, data_size);
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(data_bufs_nc[dset_idx]);
+ }
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -7549,10 +8621,11 @@ test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hi
*/
static void
test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id)
+ hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
@@ -7560,13 +8633,16 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t
hsize_t stride[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t count[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
- size_t i, data_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing write to shared filtered edge chunks");
+ puts("Testing write to shared filtered edge chunks");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -7595,12 +8671,12 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t
/* Add test filter to the pipeline */
VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -7618,64 +8694,66 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t
start[1] =
(hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- /* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
- read_buf = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ read_bufs[dset_idx] = tmp_buf;
+ }
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, num_dsets, test_mode, dset_ids);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
/* Repeat the previous, but set option to not filter partial edge chunks */
if (MAINPROCESS)
- HDputs("Testing write to shared unfiltered edge chunks");
+ puts("Testing write to shared unfiltered edge chunks");
+
+ filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
- dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace,
+ plist_id, test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -7693,48 +8771,41 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t
start[1] =
(hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify the correct data was written */
- dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, num_dsets, test_mode, dset_ids);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ memset(read_bufs[dset_idx], 255, data_size);
- memset(read_buf, 255, data_size);
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids[0], dxpl_id, read_bufs,
+ test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 == memcmp(read_bufs[dset_idx], data_bufs[dset_idx], data_size)),
+ "Data verification succeeded");
- VRFY((0 == memcmp(read_buf, data, data_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ free(read_bufs[dset_idx]);
+ free(data_bufs_nc[dset_idx]);
+ }
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -7742,36 +8813,18 @@ test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t
}
/*
- * Tests that filtered and unfiltered partial edge chunks can be
- * written to and read from correctly in parallel when only one
- * MPI rank writes to a particular edge chunk in the dataset and
- * only performs a partial write to the edge chunk.
- *
- * The dataset contains partial edge chunks in the second dimension.
- * Each MPI rank selects a hyperslab in the shape of part of a single
- * edge chunk and writes to just a portion of the edge chunk.
- */
-static void
-test_edge_chunks_partial_write(const char H5_ATTR_PARALLEL_UNUSED *parent_group,
- H5Z_filter_t H5_ATTR_PARALLEL_UNUSED filter_id,
- hid_t H5_ATTR_PARALLEL_UNUSED fapl_id, hid_t H5_ATTR_PARALLEL_UNUSED dcpl_id,
- hid_t H5_ATTR_PARALLEL_UNUSED dxpl_id)
-{
- /* TODO */
-}
-
-/*
* Tests that the parallel compression feature correctly handles
* writing fill values to a dataset and reading fill values from
* unallocated parts of a dataset.
*/
static void
test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id)
+ hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *correct_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
C_DATATYPE fill_value;
hsize_t dataset_dims[FILL_VALUES_TEST_DATASET_DIMS];
hsize_t chunk_dims[FILL_VALUES_TEST_DATASET_DIMS];
@@ -7780,15 +8833,19 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
hsize_t stride[FILL_VALUES_TEST_DATASET_DIMS];
hsize_t count[FILL_VALUES_TEST_DATASET_DIMS];
hsize_t block[FILL_VALUES_TEST_DATASET_DIMS];
- size_t i, data_size, read_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing fill values");
+ puts("Testing fill values");
file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
@@ -7820,32 +8877,35 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
fill_value = FILL_VALUES_TEST_FILL_VAL;
VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set");
- dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT,
- plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Allocate buffer for reading entire dataset */
- read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
-
- read_buf = calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
- correct_buf = calloc(1, read_buf_size);
- VRFY((NULL != correct_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ correct_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != correct_bufs[dset_idx]), "calloc succeeded");
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
/* Read entire dataset and verify that the fill value is returned */
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
- correct_buf[i] = FILL_VALUES_TEST_FILL_VAL;
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] = FILL_VALUES_TEST_FILL_VAL;
- VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)),
+ "Data verification succeeded");
+ }
/*
* Write to part of the first chunk in the dataset with
@@ -7863,44 +8923,35 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
/*
* Each MPI rank communicates their written piece of data
@@ -7912,16 +8963,22 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++) {
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)(count[1] * block[1]);
displs[i] = (int)(i * dataset_dims[1]);
}
- VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts,
- displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ correct_bufs[dset_idx], recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)),
+ "Data verification succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
/*
* Write to whole dataset and ensure fill value isn't returned
@@ -7940,60 +8997,62 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
- VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = read_bufs[dset_idx];
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++)
+ VRFY((tmp_buf[j] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
/********************************************************************
* Set the fill time to H5D_FILL_TIME_ALLOC and repeat the previous *
********************************************************************/
+ filespace = H5Screate_simple(FILL_VALUES_TEST_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_ALLOC) >= 0), "H5Pset_fill_time succeeded");
- dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT,
- plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Read entire dataset and verify that the fill value is returned */
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
- correct_buf[i] = FILL_VALUES_TEST_FILL_VAL;
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++)
+ correct_bufs[dset_idx][j] = FILL_VALUES_TEST_FILL_VAL;
- VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)),
+ "Data verification succeeded");
+ }
/*
* Write to part of the first chunk in the dataset with
@@ -8011,40 +9070,30 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = data_bufs_nc[dset_idx];
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
+ }
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- for (i = 0; i < (size_t)mpi_size; i++) {
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)(count[1] * block[1]);
displs[i] = (int)(i * dataset_dims[1]);
}
@@ -8053,11 +9102,17 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
* Each MPI rank communicates their written piece of data
* into each other rank's correctness-checking buffer
*/
- VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts,
- displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ correct_bufs[dset_idx], recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+ VRFY((0 == memcmp(read_bufs[dset_idx], correct_bufs[dset_idx], read_buf_size)),
+ "Data verification succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
/*
* Write to whole dataset and ensure fill value isn't returned
@@ -8076,49 +9131,44 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
-
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
-
- for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
- VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
-
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
- if (correct_buf)
- free(correct_buf);
+ open_datasets(group_id, FILL_VALUES_TEST_DATASET_NAME2, num_dsets, test_mode, dset_ids);
+
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = read_bufs[dset_idx];
+
+ for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++)
+ VRFY((tmp_buf[j] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
+ }
+
+ free(displs);
+ free(recvcounts);
+
+ for (size_t dset_idx = 0; dset_idx < MAX_NUM_DSETS_MULTI; dset_idx++) {
+ free(data_bufs_nc[dset_idx]);
+ free(read_bufs[dset_idx]);
+ free(correct_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -8132,11 +9182,12 @@ test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id
*/
static void
test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id)
+ hid_t dxpl_id, test_mode_t test_mode)
{
H5D_alloc_time_t alloc_time;
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
hsize_t dataset_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
hsize_t chunk_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
hsize_t sel_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
@@ -8144,13 +9195,16 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_
hsize_t stride[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
hsize_t count[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
hsize_t block[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
- size_t i, data_size, read_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
- HDputs("Testing undefined fill value");
+ puts("Testing undefined fill value");
VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
@@ -8183,37 +9237,93 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_
/* Set an undefined fill value */
VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, NULL) >= 0), "Fill Value set");
- dset_id = H5Dcreate2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ /*
+ * Since we aren't writing fill values to the chunks of the
+ * datasets we just created, close and re-open file to ensure
+ * that file size is updated so we don't read past the end of
+ * the file later if doing multi-dataset I/O.
+ */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
+
/* Allocate buffer for reading entire dataset */
- read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
- read_buf = calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
/*
* Read entire dataset - nothing to verify since there's no fill value.
- * If not using early space allocation, the read should fail since storage
- * isn't allocated yet and no fill value is defined.
+ * If not using early space allocation, the read should fail for filtered
+ * datasets since storage isn't allocated yet and no fill value is defined.
+ * For unfiltered datasets, the library will still be forcing early space
+ * allocation in parallel, so the read should succeed in that case.
*/
if (alloc_time == H5D_ALLOC_TIME_EARLY) {
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs,
+ test_mode);
}
else {
- H5E_BEGIN_TRY
- {
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) < 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ herr_t expected = FAIL;
+ herr_t ret;
+
+ if (test_mode == USE_MULTIPLE_DATASETS_MIXED_FILTERED) {
+ hid_t dset_dcpl;
+ int nfilters;
+
+ dset_dcpl = H5Dget_create_plist(dset_ids[dset_idx]);
+ VRFY((dset_dcpl >= 0), "H5Dget_create_plist");
+
+ nfilters = H5Pget_nfilters(dset_dcpl);
+ VRFY((nfilters >= 0), "H5Pget_nfilters");
+
+ if (nfilters == 0)
+ expected = SUCCEED;
+
+ VRFY((H5Pclose(dset_dcpl) >= 0), "H5Pclose");
+ }
+
+ if (expected == SUCCEED)
+ ret = H5Dread(dset_ids[dset_idx], HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id,
+ read_bufs[dset_idx]);
+ else {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dread(dset_ids[dset_idx], HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id,
+ read_bufs[dset_idx]);
+ }
+ H5E_END_TRY
+ }
+
+ VRFY((ret == expected), "Dataset write");
+
+ if (expected == SUCCEED)
+ verify_chunk_opt_status(1, dxpl_id);
+ else
+ verify_chunk_opt_status(0, dxpl_id);
}
- H5E_END_TRY
}
/*
@@ -8230,43 +9340,37 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = (C_DATATYPE *)calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
- dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
/*
* Write to whole dataset and ensure data is correct
@@ -8285,40 +9389,33 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_
start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
+
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(data_bufs_nc[dset_idx]);
+ free(read_bufs[dset_idx]);
+ }
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -8332,11 +9429,12 @@ test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_
*/
static void
test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
- hid_t dxpl_id)
+ hid_t dxpl_id, test_mode_t test_mode)
{
- C_DATATYPE *data = NULL;
- C_DATATYPE *read_buf = NULL;
- C_DATATYPE *fill_buf = NULL;
+ const void *data_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ void *data_bufs_nc[MAX_NUM_DSETS_MULTI] = {0}; /* non-const buffer pointers for freeing */
+ void *read_bufs[MAX_NUM_DSETS_MULTI] = {0};
+ C_DATATYPE *fill_buf = NULL;
C_DATATYPE fill_value;
hsize_t dataset_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
hsize_t chunk_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
@@ -8345,15 +9443,19 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
hsize_t stride[FILL_TIME_NEVER_TEST_DATASET_DIMS];
hsize_t count[FILL_TIME_NEVER_TEST_DATASET_DIMS];
hsize_t block[FILL_TIME_NEVER_TEST_DATASET_DIMS];
- size_t i, data_size, read_buf_size;
- hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ size_t data_size, read_buf_size;
+ size_t num_dsets;
+ hid_t dset_ids[MAX_NUM_DSETS_MULTI];
+ hid_t fspace_ids[MAX_NUM_DSETS_MULTI];
+ hid_t file_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
hid_t group_id = H5I_INVALID_HID;
hid_t filespace = H5I_INVALID_HID;
int *recvcounts = NULL;
int *displs = NULL;
+ int mpi_code;
if (MAINPROCESS)
- HDputs("Testing fill time H5D_FILL_TIME_NEVER");
+ puts("Testing fill time H5D_FILL_TIME_NEVER");
/*
* Only run this test when incremental file space allocation is
@@ -8406,29 +9508,49 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
/* Set fill time of 'never' */
VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_NEVER) >= 0), "H5Pset_fill_time succeeded");
- dset_id = H5Dcreate2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Create datasets depending on the current test mode */
+ create_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, plist_id,
+ test_mode, &num_dsets, dset_ids);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, DATASET_JUST_CREATED);
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ /*
+ * Since we aren't writing fill values to the chunks of the
+ * datasets we just created, close and re-open file to ensure
+ * that file size is updated so we don't read past the end of
+ * the file later if doing multi-dataset I/O.
+ */
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
+
/* Allocate buffer for reading entire dataset */
- read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(C_DATATYPE);
- read_buf = calloc(1, read_buf_size);
- VRFY((NULL != read_buf), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ read_bufs[dset_idx] = calloc(1, read_buf_size);
+ VRFY((NULL != read_bufs[dset_idx]), "calloc succeeded");
+ }
fill_buf = calloc(1, read_buf_size);
VRFY((NULL != fill_buf), "calloc succeeded");
/* Read entire dataset and verify that the fill value isn't returned */
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
- for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ for (size_t i = 0; i < read_buf_size / sizeof(C_DATATYPE); i++)
fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL;
/*
@@ -8436,7 +9558,8 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
* values to all be the fill value, so this should be
* a safe comparison in theory.
*/
- VRFY((0 != memcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded");
/*
* Write to part of the first chunk in the dataset with
@@ -8454,44 +9577,35 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
start[0] = (hsize_t)mpi_rank;
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- /* Select hyperslab in the file */
- filespace = H5Dget_space(dset_id);
- VRFY((filespace >= 0), "File dataspace retrieval succeeded");
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
/* Fill data buffer */
- data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(C_DATATYPE);
- data = (C_DATATYPE *)calloc(1, data_size);
- VRFY((NULL != data), "calloc succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = calloc(1, data_size);
+ VRFY((NULL != tmp_buf), "calloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
+ for (size_t j = 0; j < data_size / sizeof(C_DATATYPE); j++)
+ tmp_buf[j] = (C_DATATYPE)(GEN_DATA(j) + dset_idx);
+
+ data_bufs[dset_idx] = tmp_buf;
+ data_bufs_nc[dset_idx] = tmp_buf;
+ }
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, SOME_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
+ open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
/*
* Each MPI rank communicates their written piece of data
@@ -8503,21 +9617,26 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
displs = calloc(1, (size_t)mpi_size * sizeof(*displs));
VRFY((NULL != displs), "calloc succeeded");
- for (i = 0; i < (size_t)mpi_size; i++) {
+ for (size_t i = 0; i < (size_t)mpi_size; i++) {
recvcounts[i] = (int)(count[1] * block[1]);
displs[i] = (int)(i * dataset_dims[1]);
}
- VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, recvcounts,
- displs, C_DATATYPE_MPI, comm)),
- "MPI_Allgatherv succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ mpi_code = MPI_Allgatherv(data_bufs[dset_idx], recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf,
+ recvcounts, displs, C_DATATYPE_MPI, comm);
+ VRFY((MPI_SUCCESS == mpi_code), "MPI_Allgatherv succeeded");
- /*
- * It should be very unlikely for the dataset's random
- * values to all be the fill value, so this should be
- * a safe comparison in theory.
- */
- VRFY((0 != memcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded");
+ /*
+ * It should be very unlikely for the dataset's random
+ * values to all be the fill value, so this should be
+ * a safe comparison in theory.
+ */
+ VRFY((0 != memcmp(read_bufs[dset_idx], fill_buf, read_buf_size)), "Data verification succeeded");
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
/*
* Write to whole dataset and ensure fill value isn't returned
@@ -8536,49 +9655,45 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
start[0] = (hsize_t)mpi_rank * block[0];
start[1] = 0;
- if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
- ", %" PRIuHSIZE " ]\n",
- mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
- }
-
- VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
- "Hyperslab selection succeeded");
+ select_hyperslab(num_dsets, dset_ids, start, stride, count, block, fspace_ids);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
- "Dataset write succeeded");
+ write_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_BLOCK, fspace_ids, dxpl_id, data_bufs,
+ test_mode);
/* Verify space allocation status */
- verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+ verify_space_alloc_status(num_dsets, dset_ids, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++)
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
/* Verify correct data was written */
- dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT);
- VRFY((dset_id >= 0), "Dataset open succeeded");
-
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
- "Dataset read succeeded");
-
- for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
- VRFY((read_buf[i] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded");
-
- if (displs)
- free(displs);
- if (recvcounts)
- free(recvcounts);
- if (data)
- free(data);
- if (read_buf)
- free(read_buf);
- if (fill_buf)
- free(fill_buf);
+ open_datasets(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, num_dsets, test_mode, dset_ids);
+
+ read_datasets(num_dsets, dset_ids, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_bufs, test_mode);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ C_DATATYPE *tmp_buf = read_bufs[dset_idx];
+
+ for (size_t j = 0; j < read_buf_size / sizeof(C_DATATYPE); j++)
+ VRFY((tmp_buf[j] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded");
+ }
+
+ free(displs);
+ free(recvcounts);
+
+ free(fill_buf);
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ free(data_bufs_nc[dset_idx]);
+ free(read_bufs[dset_idx]);
+ }
+
+ for (size_t dset_idx = 0; dset_idx < num_dsets; dset_idx++) {
+ VRFY((H5Sclose(fspace_ids[dset_idx]) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_ids[dset_idx]) >= 0), "Dataset close succeeded");
+ }
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
- VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
@@ -8589,28 +9704,44 @@ test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fap
int
main(int argc, char **argv)
{
- size_t cur_filter_idx = 0;
- size_t num_filters = 0;
- hid_t file_id = H5I_INVALID_HID;
- hid_t fcpl_id = H5I_INVALID_HID;
- hid_t group_id = H5I_INVALID_HID;
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t dxpl_id = H5I_INVALID_HID;
- hid_t dcpl_id = H5I_INVALID_HID;
- int mpi_code;
+ unsigned seed;
+ size_t cur_filter_idx = 0;
+ size_t num_filters = 0;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fcpl_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ int mpi_code;
/* Initialize MPI */
- MPI_Init(&argc, &argv);
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ if (MPI_SUCCESS != (mpi_code = MPI_Init(&argc, &argv))) {
+ printf("Failed to initialize MPI: MPI error code %d\n", mpi_code);
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Comm_size(comm, &mpi_size))) {
+ printf("Failed to retrieve MPI communicator size: MPI error code %d\n", mpi_code);
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Comm_rank(comm, &mpi_rank))) {
+ printf("Failed to retrieve MPI communicator rank: MPI error code %d\n", mpi_code);
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
if (mpi_size <= 0) {
if (MAINPROCESS) {
printf("The Parallel Filters tests require at least 1 rank.\n");
printf("Quitting...\n");
+ fflush(stdout);
}
- MPI_Abort(MPI_COMM_WORLD, 1);
+ MPI_Abort(MPI_COMM_WORLD, -1);
}
if (H5dont_atexit() < 0) {
@@ -8632,6 +9763,30 @@ main(int argc, char **argv)
TestAlarmOn();
+ /*
+ * Obtain and broadcast seed value since ranks
+ * aren't guaranteed to arrive here at exactly
+ * the same time and could end up out of sync
+ * with each other in regards to random number
+ * generation
+ */
+ if (MAINPROCESS)
+ seed = (unsigned)time(NULL);
+
+ if (mpi_size > 1) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, comm))) {
+ if (MAINPROCESS)
+ printf("MPI_Bcast failed with error code %d\n", mpi_code);
+ fflush(stdout);
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+ }
+
+ srand(seed);
+
+ if (MAINPROCESS)
+ printf("Using seed: %u\n\n", seed);
+
num_filters = ARRAY_SIZE(filterIDs);
/* Set up file access property list with parallel I/O access,
@@ -8641,9 +9796,8 @@ main(int argc, char **argv)
VRFY((fapl_id >= 0), "FAPL creation succeeded");
VRFY((H5Pset_fapl_mpio(fapl_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
- VRFY((H5Pset_all_coll_metadata_ops(fapl_id, TRUE) >= 0), "H5Pset_all_coll_metadata_ops succeeded");
- VRFY((H5Pset_coll_metadata_write(fapl_id, TRUE) >= 0), "H5Pset_coll_metadata_write succeeded");
-
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "H5Pset_all_coll_metadata_ops succeeded");
+ VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "H5Pset_coll_metadata_write succeeded");
VRFY((H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
"Set libver bounds succeeded");
@@ -8653,7 +9807,7 @@ main(int argc, char **argv)
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
VRFY((fcpl_id >= 0), "FCPL creation succeeded");
- VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, 1) >= 0),
+ VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, true, 1) >= 0),
"H5Pset_file_space_strategy succeeded");
VRFY((h5_fixname(FILENAME[0], fapl_id, filenames[0], sizeof(filenames[0])) != NULL),
@@ -8677,108 +9831,160 @@ main(int argc, char **argv)
/* Run tests with all available filters */
for (cur_filter_idx = 0; cur_filter_idx < num_filters; cur_filter_idx++) {
- H5FD_mpio_chunk_opt_t chunk_opt;
- H5Z_filter_t cur_filter = filterIDs[cur_filter_idx];
-
- /* Run tests with both linked-chunk and multi-chunk I/O */
- for (chunk_opt = H5FD_MPIO_CHUNK_ONE_IO; chunk_opt <= H5FD_MPIO_CHUNK_MULTI_IO; chunk_opt++) {
- H5D_alloc_time_t space_alloc_time;
-
- /* Run tests with all available space allocation times */
- for (space_alloc_time = H5D_ALLOC_TIME_EARLY; space_alloc_time <= H5D_ALLOC_TIME_INCR;
- space_alloc_time++) {
- const char *alloc_time;
- unsigned filter_config;
- htri_t filter_avail;
- size_t i;
- char group_name[512];
-
- switch (space_alloc_time) {
- case H5D_ALLOC_TIME_EARLY:
- alloc_time = "Early";
- break;
- case H5D_ALLOC_TIME_LATE:
- alloc_time = "Late";
- break;
- case H5D_ALLOC_TIME_INCR:
- alloc_time = "Incremental";
- break;
- case H5D_ALLOC_TIME_DEFAULT:
- case H5D_ALLOC_TIME_ERROR:
- default:
- alloc_time = "Unknown";
- }
-
- if (MAINPROCESS)
- printf("== Running tests with filter '%s' using '%s' and '%s' allocation time ==\n\n",
- filterNames[cur_filter_idx],
- H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "Linked-Chunk I/O" : "Multi-Chunk I/O",
- alloc_time);
-
- /* Make sure current filter is available before testing with it */
- filter_avail = H5Zfilter_avail(cur_filter);
- VRFY((filter_avail >= 0), "H5Zfilter_avail succeeded");
-
- if (!filter_avail) {
- if (MAINPROCESS)
- printf(" ** SKIPPED tests with filter '%s' - filter unavailable **\n\n",
- filterNames[cur_filter_idx]);
- continue;
- }
+ H5D_selection_io_mode_t sel_io_mode;
+
+ /* Run tests with different selection I/O modes */
+ for (sel_io_mode = H5D_SELECTION_IO_MODE_DEFAULT; sel_io_mode <= H5D_SELECTION_IO_MODE_ON;
+ sel_io_mode++) {
+ H5FD_mpio_chunk_opt_t chunk_opt;
+
+ /* Run tests with both linked-chunk and multi-chunk I/O */
+ for (chunk_opt = H5FD_MPIO_CHUNK_ONE_IO; chunk_opt <= H5FD_MPIO_CHUNK_MULTI_IO; chunk_opt++) {
+ H5D_alloc_time_t space_alloc_time;
+
+ /* Run tests with all available space allocation times */
+ for (space_alloc_time = H5D_ALLOC_TIME_EARLY; space_alloc_time <= H5D_ALLOC_TIME_INCR;
+ space_alloc_time++) {
+ test_mode_t test_mode;
+
+ /* Run with each of the test modes (single dataset, multiple datasets, etc.) */
+ for (test_mode = USE_SINGLE_DATASET; test_mode < TEST_MODE_SENTINEL; test_mode++) {
+ H5Z_filter_t cur_filter = filterIDs[cur_filter_idx];
+ const char *sel_io_str;
+ const char *alloc_time;
+ const char *mode;
+ unsigned filter_config;
+ htri_t filter_avail;
+ char group_name[512];
+
+ switch (sel_io_mode) {
+ case H5D_SELECTION_IO_MODE_DEFAULT:
+ sel_io_str = "default";
+ break;
+ case H5D_SELECTION_IO_MODE_OFF:
+ sel_io_str = "off";
+ break;
+ case H5D_SELECTION_IO_MODE_ON:
+ sel_io_str = "on";
+ break;
+ default:
+ sel_io_str = "unknown";
+ }
+
+ switch (space_alloc_time) {
+ case H5D_ALLOC_TIME_EARLY:
+ alloc_time = "Early";
+ break;
+ case H5D_ALLOC_TIME_LATE:
+ alloc_time = "Late";
+ break;
+ case H5D_ALLOC_TIME_INCR:
+ alloc_time = "Incremental";
+ break;
+ case H5D_ALLOC_TIME_DEFAULT:
+ case H5D_ALLOC_TIME_ERROR:
+ default:
+ alloc_time = "Unknown";
+ }
+
+ switch (test_mode) {
+ case USE_SINGLE_DATASET:
+ mode = "single";
+ break;
+ case USE_MULTIPLE_DATASETS:
+ mode = "multi";
+ break;
+ case USE_MULTIPLE_DATASETS_MIXED_FILTERED:
+ mode = "multi-mixed-filtered";
+ break;
+ case TEST_MODE_SENTINEL:
+ default:
+ mode = "unknown";
+ }
- /* Get the current filter's info */
- VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), "H5Zget_filter_info succeeded");
-
- /* Determine if filter is encode-enabled */
- if (0 == (filter_config & H5Z_FILTER_CONFIG_ENCODE_ENABLED)) {
- if (MAINPROCESS)
- printf(" ** SKIPPED tests with filter '%s' - filter not encode-enabled **\n\n",
- filterNames[cur_filter_idx]);
- continue;
- }
-
- /* Set space allocation time */
- VRFY((H5Pset_alloc_time(dcpl_id, space_alloc_time) >= 0), "H5Pset_alloc_time succeeded");
-
- /* Set chunk I/O optimization method */
- VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0),
- "H5Pset_dxpl_mpio_chunk_opt succeeded");
-
- /* Create a group to hold all the datasets for this combination
- * of filter and chunk optimization mode. Then, close the file
- * again since some tests may need to open the file in a special
- * way, like on rank 0 only */
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
- VRFY((file_id >= 0), "H5Fopen succeeded");
-
- HDsnprintf(group_name, sizeof(group_name), "%s_%s_%s", filterNames[cur_filter_idx],
- H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "linked-chunk-io" : "multi-chunk-io",
- alloc_time);
-
- group_id = H5Gcreate2(file_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((group_id >= 0), "H5Gcreate2 succeeded");
-
- VRFY((H5Gclose(group_id) >= 0), "H5Gclose failed");
- group_id = H5I_INVALID_HID;
-
- VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
- file_id = H5I_INVALID_HID;
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- test_func func = tests[i];
+ if (MAINPROCESS)
+ printf("== Running tests in mode '%s' with filter '%s' using selection I/O mode "
+ "'%s', '%s' and '%s' allocation time ==\n\n",
+ test_mode_to_string(test_mode), filterNames[cur_filter_idx], sel_io_str,
+ H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "Linked-Chunk I/O"
+ : "Multi-Chunk I/O",
+ alloc_time);
+
+ /* Make sure current filter is available before testing with it */
+ filter_avail = H5Zfilter_avail(cur_filter);
+ VRFY((filter_avail >= 0), "H5Zfilter_avail succeeded");
+
+ if (!filter_avail) {
+ if (MAINPROCESS)
+ printf(" ** SKIPPED tests with filter '%s' - filter unavailable **\n\n",
+ filterNames[cur_filter_idx]);
+ continue;
+ }
+
+ /* Get the current filter's info */
+ VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0),
+ "H5Zget_filter_info succeeded");
+
+ /* Determine if filter is encode-enabled */
+ if (0 == (filter_config & H5Z_FILTER_CONFIG_ENCODE_ENABLED)) {
+ if (MAINPROCESS)
+ printf(
+ " ** SKIPPED tests with filter '%s' - filter not encode-enabled **\n\n",
+ filterNames[cur_filter_idx]);
+ continue;
+ }
+
+ /* Set space allocation time */
+ VRFY((H5Pset_alloc_time(dcpl_id, space_alloc_time) >= 0),
+ "H5Pset_alloc_time succeeded");
+
+ /* Set selection I/O mode */
+ VRFY((H5Pset_selection_io(dxpl_id, sel_io_mode) >= 0),
+ "H5Pset_selection_io succeeded");
+
+ /* Set chunk I/O optimization method */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ /* Create a group to hold all the datasets for this combination
+ * of filter and chunk optimization mode. Then, close the file
+ * again since some tests may need to open the file in a special
+ * way, like on rank 0 only */
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "H5Fopen succeeded");
+
+ snprintf(group_name, sizeof(group_name), "%s_sel-io-%s_%s_%s_%s",
+ filterNames[cur_filter_idx], sel_io_str,
+ H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "linked-chunk-io" : "multi-chunk-io",
+ alloc_time, mode);
+
+ group_id = H5Gcreate2(file_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gcreate2 succeeded");
+
+ VRFY((H5Gclose(group_id) >= 0), "H5Gclose failed");
+ group_id = H5I_INVALID_HID;
+
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+ file_id = H5I_INVALID_HID;
+
+ /* Run all tests */
+ for (size_t i = 0; i < ARRAY_SIZE(tests); i++) {
+ test_func func = tests[i];
+
+ if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
+ func(group_name, cur_filter, fapl_id, dcpl_id, dxpl_id, test_mode);
+ }
+ else {
+ if (MAINPROCESS)
+ MESG("MPI_Barrier failed");
+ nerrors++;
+ }
+ }
- if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
- func(group_name, cur_filter, fapl_id, dcpl_id, dxpl_id);
- }
- else {
if (MAINPROCESS)
- MESG("MPI_Barrier failed");
- nerrors++;
+ puts("");
}
}
-
- if (MAINPROCESS)
- HDputs("");
}
}
}
@@ -8793,7 +9999,7 @@ main(int argc, char **argv)
goto exit;
if (MAINPROCESS)
- HDputs("All Parallel Filters tests passed\n");
+ puts("All Parallel Filters tests passed\n");
exit:
if (nerrors)
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
index e110d0e..c0b1db8 100644
--- a/testpar/t_filters_parallel.h
+++ b/testpar/t_filters_parallel.h
@@ -58,6 +58,11 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES;
#define DIM0_SCALE_FACTOR 4
#define DIM1_SCALE_FACTOR 2
+/* The maximum number of datasets to work on simultaneously
+ * when using H5Dwrite_multi/H5Dread_multi
+ */
+#define MAX_NUM_DSETS_MULTI 5
+
/* Struct type for the compound datatype filtered dataset tests */
typedef struct {
short field1;
diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c
index 929bb59..0268e3d 100644
--- a/testpar/t_init_term.c
+++ b/testpar/t_init_term.c
@@ -37,11 +37,9 @@ main(int argc, char **argv)
MPI_Finalize();
- nerrors += GetTestNumErrs();
-
/* test if we can initialize the library with MPI being finalized
and create a file serially */
- H5open();
+ VRFY((H5open() >= 0), "H5open succeeded");
if (mpi_rank == 0) {
char filename[1024];
@@ -54,7 +52,7 @@ main(int argc, char **argv)
file_id = -1;
}
- H5close();
+ VRFY((H5close() >= 0), "H5close succeeded");
if (MAINPROCESS) {
if (0 == nerrors)
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index bc6305e..582e441 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -182,7 +182,7 @@ multiple_dset_write(void)
VRFY((ret >= 0), "set fill-value succeeded");
for (n = 0; n < ndatasets; n++) {
- HDsnprintf(dname, sizeof(dname), "dataset %d", n);
+ snprintf(dname, sizeof(dname), "dataset %d", n);
dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset > 0), dname);
@@ -224,7 +224,7 @@ compact_dataset(void)
herr_t ret;
const char *filename;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
+ bool prop_value;
#endif
size = get_size();
@@ -314,10 +314,10 @@ compact_dataset(void)
VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
+ prop_value = false;
ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
VRFY((ret >= 0), "H5Pget succeeded");
- VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),
+ VRFY((prop_value == false && dxfer_coll_type == DXFER_COLLECTIVE_IO),
"rank 0 Bcast optimization was performed for a compact dataset");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
@@ -588,7 +588,7 @@ dataset_fillvalue(void)
herr_t ret; /* Generic return value */
const char *filename;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
+ bool prop_value;
#endif
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -656,13 +656,13 @@ dataset_fillvalue(void)
VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
+ prop_value = false;
ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
VRFY((ret >= 0), "testing property list get succeeded");
if (ii == 0)
- VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == false), "correctly handled rank 0 Bcast");
else
- VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == true), "correctly handled rank 0 Bcast");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* Verify all data read are the fill value 0 */
@@ -746,13 +746,13 @@ dataset_fillvalue(void)
VRFY((ret >= 0), "H5Dread succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- prop_value = FALSE;
+ prop_value = false;
ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
VRFY((ret >= 0), "testing property list get succeeded");
if (ii == 0)
- VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == false), "correctly handled rank 0 Bcast");
else
- VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+ VRFY((prop_value == true), "correctly handled rank 0 Bcast");
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* Verify correct data read */
@@ -877,11 +877,11 @@ collective_group_write(void)
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
for (m = 0; m < ngroups; m++) {
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
- HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ snprintf(dname, sizeof(dname), "dataset%d", m);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -936,7 +936,7 @@ independent_group_read(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
- H5Pset_all_coll_metadata_ops(plist, FALSE);
+ H5Pset_all_coll_metadata_ops(plist, false);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
VRFY((fid > 0), "H5Fopen");
@@ -978,12 +978,12 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
VRFY((outdata != NULL), "malloc succeeded for outdata");
/* open every group under root group. */
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
- HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ snprintf(dname, sizeof(dname), "dataset%d", m);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1079,7 +1079,7 @@ multiple_group_write(void)
/* creates ngroups groups under the root group, writes datasets in
* parallel. */
for (m = 0; m < ngroups; m++) {
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
@@ -1135,7 +1135,7 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outme != NULL), "malloc succeeded for outme");
for (n = 0; n < NDATASET; n++) {
- HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ snprintf(dname, sizeof(dname), "dataset%d", n);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1173,7 +1173,7 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
}
#endif /* BARRIER_CHECKS */
- HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
@@ -1227,7 +1227,7 @@ multiple_group_read(void)
/* open every group under root group. */
for (m = 0; m < ngroups; m++) {
- HDsnprintf(gname, sizeof(gname), "group%d", m);
+ snprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
@@ -1284,7 +1284,7 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outdata != NULL), "malloc succeeded for outdata");
for (n = 0; n < NDATASET; n++) {
- HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ snprintf(dname, sizeof(dname), "dataset%d", n);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1335,7 +1335,7 @@ recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
nerrors += err_num;
if (counter < GROUP_DEPTH) {
- HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ snprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
recursive_read_group(memspace, filespace, child_gid, counter + 1);
@@ -1357,7 +1357,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (this_type == is_group) {
- HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
sid = H5Screate(H5S_SCALAR);
aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
H5Awrite(aid, H5T_NATIVE_INT, &num);
@@ -1365,7 +1365,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
H5Sclose(sid);
} /* end if */
else if (this_type == is_dset) {
- HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
for (i = 0; i < 8; i++)
attr_data[i] = i;
sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
@@ -1388,14 +1388,14 @@ read_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (this_type == is_group) {
- HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
H5Aread(aid, H5T_NATIVE_INT, &in_num);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
H5Aclose(aid);
}
else if (this_type == is_dset) {
- HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ snprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
for (i = 0; i < 8; i++)
out_data[i] = i;
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
@@ -1516,7 +1516,7 @@ io_mode_confusion(void)
*/
const char *fcn_name = "io_mode_confusion";
- const hbool_t verbose = FALSE;
+ const bool verbose = false;
const H5Ptest_param_t *pt;
char *filename;
@@ -1765,8 +1765,8 @@ rr_obj_hdr_flush_confusion(void)
MPI_Comm comm;
/* test bed related variables */
- const char *fcn_name = "rr_obj_hdr_flush_confusion";
- const hbool_t verbose = FALSE;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion";
+ const bool verbose = false;
/* Create two new private communicators from MPI_COMM_WORLD.
* Even and odd ranked processes go to comm_writers and comm_readers
@@ -1806,9 +1806,9 @@ rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
{
int i;
int j;
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
hid_t att_id[NUM_DATA_SETS];
hid_t att_space[NUM_DATA_SETS];
hid_t lg_att_id[NUM_DATA_SETS];
@@ -1843,7 +1843,7 @@ rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* test bed related variables */
const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
- const hbool_t verbose = FALSE;
+ const bool verbose = false;
const H5Ptest_param_t *pt;
char *filename;
@@ -2187,9 +2187,9 @@ rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
{
int i;
int j;
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
hid_t lg_att_id[NUM_DATA_SETS];
hid_t lg_att_type[NUM_DATA_SETS];
hid_t disk_space[NUM_DATA_SETS];
@@ -2222,7 +2222,7 @@ rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* test bed related variables */
const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
- const hbool_t verbose = FALSE;
+ const bool verbose = false;
const H5Ptest_param_t *pt;
char *filename;
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 7d8d7fa..a61e674 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -78,7 +78,7 @@ create_test_file(char *name, size_t name_length, hid_t fapl_id)
if ((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < N_GROUPS; i++) {
- HDsnprintf(name, name_length, "grp%02u", (unsigned)i);
+ snprintf(name, name_length, "grp%02u", (unsigned)i);
if ((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
@@ -121,14 +121,14 @@ main(int argc, char *argv[])
TESTING("H5Fflush (part1)");
/* Don't run using the split VFD */
- envval = HDgetenv(HDF5_DRIVER);
+ envval = getenv(HDF5_DRIVER);
if (envval == NULL)
envval = "nomatch";
- if (!HDstrcmp(envval, "split")) {
+ if (!strcmp(envval, "split")) {
if (mpi_rank == 0) {
SKIPPED();
- HDputs(" Test not compatible with current Virtual File Driver");
+ puts(" Test not compatible with current Virtual File Driver");
}
MPI_Finalize();
exit(EXIT_FAILURE);
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index 81ed2bd..95ad125 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -82,7 +82,7 @@ check_test_file(char *name, size_t name_length, hid_t fapl_id)
if ((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < N_GROUPS; i++) {
- HDsnprintf(name, name_length, "grp%02u", (unsigned)i);
+ snprintf(name, name_length, "grp%02u", (unsigned)i);
if ((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
@@ -146,14 +146,14 @@ main(int argc, char *argv[])
TESTING("H5Fflush (part2 with flush)");
/* Don't run using the split VFD */
- envval = HDgetenv(HDF5_DRIVER);
+ envval = getenv(HDF5_DRIVER);
if (envval == NULL)
envval = "nomatch";
- if (!HDstrcmp(envval, "split")) {
+ if (!strcmp(envval, "split")) {
if (mpi_rank == 0) {
SKIPPED();
- HDputs(" Test not compatible with current Virtual File Driver");
+ puts(" Test not compatible with current Virtual File Driver");
}
MPI_Finalize();
exit(EXIT_SUCCESS);
diff --git a/testpar/t_pmulti_dset.c b/testpar/t_pmulti_dset.c
index 8d7fdef..4d8bc77 100644
--- a/testpar/t_pmulti_dset.c
+++ b/testpar/t_pmulti_dset.c
@@ -68,8 +68,8 @@ unsigned seed;
int nerrors = 0;
/* Whether these filters are available */
-htri_t deflate_avail = FALSE;
-htri_t fletcher32_avail = FALSE;
+htri_t deflate_avail = false;
+htri_t fletcher32_avail = false;
/*-------------------------------------------------------------------------
* Function: test_pmdset
@@ -102,10 +102,10 @@ test_pmdset(size_t niter, unsigned flags)
size_t max_dsets;
size_t buf_size;
size_t ndsets;
- hid_t file_id = -1;
- hid_t fapl_id = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
hid_t dcpl_id[MAX_DSETS];
- hid_t dxpl_id = -1;
+ hid_t dxpl_id = H5I_INVALID_HID;
hsize_t dset_dims[MAX_DSETS][3];
hsize_t chunk_dims[2];
hsize_t max_dims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
@@ -119,9 +119,9 @@ test_pmdset(size_t niter, unsigned flags)
unsigned *efbufi[MAX_DSETS][MAX_DSET_X];
unsigned char *dset_usage;
unsigned char *dset_usagei[MAX_DSETS][MAX_DSET_X];
- hbool_t do_read;
- hbool_t last_read;
- hbool_t overlap;
+ bool do_read;
+ bool last_read;
+ bool overlap;
hsize_t start[MAX_HS][3];
hsize_t count[MAX_HS][3];
hsize_t points[3 * MAX_POINTS];
@@ -276,7 +276,7 @@ test_pmdset(size_t niter, unsigned flags)
/* Create datasets */
for (j = 0; j < ndsets; j++) {
- hbool_t use_chunk =
+ bool use_chunk =
(flags & MDSET_FLAG_CHUNK) || ((flags & MDSET_FLAG_MLAYOUT) && (j == 1 || j == 2));
/* Generate file dataspace */
@@ -318,14 +318,14 @@ test_pmdset(size_t niter, unsigned flags)
/* Initialize expected file buffer */
(void)memset(efbuf, 0, buf_size);
- /* Set last_read to TRUE so we don't reopen the file on the first
+ /* Set last_read to true so we don't reopen the file on the first
* iteration */
- last_read = TRUE;
+ last_read = true;
/* Perform read/write operations */
for (j = 0; j < OPS_PER_FILE; j++) {
/* Decide whether to read or write */
- do_read = (hbool_t)(HDrandom() % 2);
+ do_read = (bool)(HDrandom() % 2);
/* Barrier to ensure processes have finished the previous operation
*/
@@ -398,7 +398,7 @@ test_pmdset(size_t niter, unsigned flags)
: dset_dims[k][1]; /* Determine maximum hyperslab size in Y */
for (m = 0; m < nhs; m++) {
- overlap = TRUE;
+ overlap = true;
for (n = 0; overlap && (n < MAX_SEL_RETRIES); n++) {
/* Generate hyperslab */
count[m][0] = (hsize_t)(((hsize_t)HDrandom() % max_hs_x) + 1);
@@ -411,13 +411,13 @@ test_pmdset(size_t niter, unsigned flags)
: (hsize_t)HDrandom() % (dset_dims[k][1] - count[m][1] + 1);
/* If writing, check for overlap with other processes */
- overlap = FALSE;
+ overlap = false;
if (!do_read)
for (o = start[m][0]; (o < (start[m][0] + count[m][0])) && !overlap; o++)
for (p = start[m][1]; (p < (start[m][1] + count[m][1])) && !overlap;
p++)
if (dset_usagei[k][o][p])
- overlap = TRUE;
+ overlap = true;
} /* end for */
/* If we did not find a non-overlapping hyperslab
@@ -470,7 +470,7 @@ test_pmdset(size_t niter, unsigned flags)
/* Generate points */
for (m = 0; m < npoints; m++) {
- overlap = TRUE;
+ overlap = true;
for (n = 0; overlap && (n < MAX_SEL_RETRIES); n++) {
/* Generate point */
points[2 * m] = (unsigned)((hsize_t)HDrandom() % dset_dims[k][0]);
@@ -478,9 +478,9 @@ test_pmdset(size_t niter, unsigned flags)
/* Check for overlap with other processes (write) or this process
* (always) */
- overlap = FALSE;
+ overlap = false;
if (dset_usagei[k][points[2 * m]][points[(2 * m) + 1]])
- overlap = TRUE;
+ overlap = true;
} /* end for */
/* If we did not find a non-overlapping point quit
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index 1aefcec..b16675b 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -34,9 +34,9 @@ const char *FILENAMES[NFILENAME + 1] = {"reloc_t_pread_data_file", "reloc_t_prea
#define LIMIT_NPROC 6
-hbool_t pass = TRUE;
+bool pass = true;
static const char *random_hdf5_text = "Now is the time for all first-time-users of HDF5 to read their \
-manual or go thru the tutorials!\n\
+manual or go through the tutorials!\n\
While you\'re at it, now is also the time to read up on MPI-IO.";
static const char *hitchhiker_quote = "A common mistake that people make when trying to design something\n\
@@ -92,30 +92,30 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
hsize_t i;
hsize_t offset;
hsize_t dims[1] = {0};
- hid_t file_id = -1;
- hid_t memspace = -1;
- hid_t filespace = -1;
- hid_t fctmpl = -1;
- hid_t fapl_id = -1;
- hid_t dxpl_id = -1;
- hid_t dset_id = -1;
- hid_t dset_id_ch = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ hid_t fctmpl = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_id_ch = H5I_INVALID_HID;
hid_t dcpl_id = H5P_DEFAULT;
hsize_t chunk[1];
float nextValue;
float *data_slice = NULL;
- pass = TRUE;
+ pass = true;
assert(comm != MPI_COMM_NULL);
if ((MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
- pass = FALSE;
+ pass = false;
failure_mssg = "generate_test_file: MPI_Comm_rank failed.\n";
}
if ((MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
- pass = FALSE;
+ pass = false;
failure_mssg = "generate_test_file: MPI_Comm_size failed.\n";
}
@@ -151,7 +151,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* Assign the 'data_filename' */
if (h5_fixname(group_filename, H5P_DEFAULT, data_filename, sizeof(data_filename)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname(0) failed.\n";
}
}
@@ -159,7 +159,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* setup data to write */
if (pass) {
if ((data_slice = (float *)malloc(COUNT * sizeof(float))) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "malloc of data_slice failed.\n";
}
}
@@ -176,25 +176,25 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* Initialize a file creation template */
if (pass) {
if ((fctmpl = H5Pcreate(H5P_FILE_CREATE)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n";
}
else if (H5Pset_userblock(fctmpl, 512) != SUCCEED) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_userblock(,size) failed.\n";
}
}
/* setup FAPL */
if (pass) {
if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
}
}
if (pass) {
if ((H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_fapl_mpio() failed\n";
}
}
@@ -202,7 +202,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* create the data file */
if (pass) {
if ((file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC, fctmpl, fapl_id)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fcreate() failed.\n";
}
}
@@ -210,14 +210,14 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* create and write the dataset */
if (pass) {
if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
}
}
if (pass) {
if ((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
}
}
@@ -225,7 +225,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
dims[0] = COUNT;
if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple(1, dims, NULL) failed (1).\n";
}
}
@@ -233,7 +233,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
dims[0] *= (hsize_t)group_size;
if ((filespace = H5Screate_simple(1, dims, NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple(1, dims, NULL) failed (2).\n";
}
}
@@ -241,7 +241,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
offset = (hsize_t)group_rank * (hsize_t)COUNT;
if ((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed.\n";
}
}
@@ -249,14 +249,14 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
if ((dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dcreate2() failed.\n";
}
}
if (pass) {
if ((H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dwrite() failed.\n";
}
}
@@ -266,14 +266,14 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate() failed.\n";
}
}
if (pass) {
if ((H5Pset_chunk(dcpl_id, 1, chunk)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_chunk() failed.\n";
}
}
@@ -282,27 +282,27 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if ((dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT,
dcpl_id, H5P_DEFAULT)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dcreate2() failed.\n";
}
}
if (pass) {
if ((H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dwrite() failed.\n";
}
}
if (pass || (dcpl_id != -1)) {
if (H5Pclose(dcpl_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(dcpl_id) failed.\n";
}
}
if (pass || (dset_id_ch != -1)) {
if (H5Dclose(dset_id_ch) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
}
}
@@ -310,56 +310,56 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* close file, etc. */
if (pass || (dset_id != -1)) {
if (H5Dclose(dset_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose(dset_id) failed.\n";
}
}
if (pass || (memspace != -1)) {
if (H5Sclose(memspace) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
if (pass || (filespace != -1)) {
if (H5Sclose(filespace) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
if (pass || (file_id != -1)) {
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose(file_id) failed.\n";
}
}
if (pass || (dxpl_id != -1)) {
if (H5Pclose(dxpl_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(dxpl_id) failed.\n";
}
}
if (pass || (fapl_id != -1)) {
if (H5Pclose(fapl_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(fapl_id) failed.\n";
}
}
if (pass || (fctmpl != -1)) {
if (H5Pclose(fctmpl) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(fctmpl) failed.\n";
}
}
/* Add a userblock to the head of the datafile.
* We will use this to for a functional test of the
- * file open optimization. This is superblock
+ * file open optimization. This superblock
* relocation is done by the rank 0 process associated
* with the communicator being used. For test 1, we
* utilize MPI_COMM_WORLD, so group_rank 0 is the
@@ -379,11 +379,11 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
else
text_to_write = hitchhiker_quote;
- bytes_to_write = HDstrlen(text_to_write);
+ bytes_to_write = strlen(text_to_write);
if (pass) {
if ((header = HDopen(data_filename, O_WRONLY)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "HDopen(data_filename, O_WRONLY) failed.\n";
}
}
@@ -391,14 +391,14 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
if (pass) {
HDlseek(header, 0, SEEK_SET);
if (HDwrite(header, text_to_write, bytes_to_write) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unable to write user text into file.\n";
}
}
if (pass || (header > 0)) {
if (HDclose(header) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "HDclose() failed.\n";
}
}
@@ -413,12 +413,12 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
/* This is a global all reduce (NOT group specific) */
if (MPI_Allreduce(&local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS) {
if (pass) {
- pass = FALSE;
+ pass = false;
failure_mssg = "MPI_Allreduce() failed.\n";
}
}
else if ((pass) && (global_failures > 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "One or more processes report failure.\n";
}
@@ -464,7 +464,7 @@ generate_test_file(MPI_Comm comm, int mpi_rank, int group_id)
*
* The global MPI rank is used for reading and
* writing data for process specific data in the
- * dataset. We do this rather simplisticly, i.e.
+ * dataset. We do this rather simplistically, i.e.
* rank 0: writes/reads 0-9999
* rank 1: writes/reads 1000-1999
* rank 2: writes/reads 2000-2999
@@ -486,14 +486,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
int global_failures = 0;
int group_size;
int group_rank;
- hid_t fapl_id = -1;
- hid_t file_id = -1;
- hid_t dset_id = -1;
- hid_t dset_id_ch = -1;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_id_ch = H5I_INVALID_HID;
hid_t dxpl_id = H5P_DEFAULT;
- hid_t memspace = -1;
- hid_t filespace = -1;
- hid_t filetype = -1;
+ hid_t memspace = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID;
size_t filetype_size;
hssize_t dset_size;
hsize_t i;
@@ -503,17 +503,17 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
float nextValue;
float *data_slice = NULL;
- pass = TRUE;
+ pass = true;
assert(comm != MPI_COMM_NULL);
if ((MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
- pass = FALSE;
+ pass = false;
failure_mssg = "test_parallel_read: MPI_Comm_rank failed.\n";
}
if ((MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
- pass = FALSE;
+ pass = false;
failure_mssg = "test_parallel_read: MPI_Comm_size failed.\n";
}
@@ -529,7 +529,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* allocate space for the data_slice array */
if (pass) {
if ((data_slice = (float *)malloc(COUNT * sizeof(float))) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "malloc of data_slice failed.\n";
}
}
@@ -551,7 +551,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (h5_fixname(group_filename, H5P_DEFAULT, reloc_data_filename, sizeof(reloc_data_filename)) ==
NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname(1) failed.\n";
}
}
@@ -559,14 +559,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* setup FAPL */
if (pass) {
if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
}
}
if (pass) {
if ((H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_fapl_mpio() failed\n";
}
}
@@ -574,7 +574,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* open the file -- should have user block, exercising the optimization */
if (pass) {
if ((file_id = H5Fopen(reloc_data_filename, H5F_ACC_RDONLY, fapl_id)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fopen() failed\n";
}
}
@@ -582,7 +582,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* open the data set */
if (pass) {
if ((dset_id = H5Dopen2(file_id, "dataset0", H5P_DEFAULT)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dopen2() failed\n";
}
}
@@ -590,7 +590,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* open the chunked data set */
if (pass) {
if ((dset_id_ch = H5Dopen2(file_id, "dataset0_chunked", H5P_DEFAULT)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dopen2() failed\n";
}
}
@@ -599,7 +599,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass) {
dims[0] = count;
if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
}
}
@@ -607,7 +607,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* setup filespace */
if (pass) {
if ((filespace = H5Dget_space(dset_id)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_space(dataset) failed\n";
}
}
@@ -615,7 +615,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass) {
offset = (hsize_t)group_rank * count;
if ((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed\n";
}
}
@@ -623,7 +623,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* read this processes section of the data */
if (pass) {
if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, data_slice)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dread() failed\n";
}
}
@@ -638,7 +638,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
* the compiler.
*/
if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected dset contents.\n";
}
nextValue += 1;
@@ -648,14 +648,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (memspace != -1)) {
if (H5Sclose(memspace) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
if (pass || (filespace != -1)) {
if (H5Sclose(filespace) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
@@ -674,49 +674,49 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (group_size <= LIMIT_NPROC) {
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- hbool_t prop_value;
+ bool prop_value;
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
if ((filespace = H5Dget_space(dset_id)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_space failed.\n";
}
if ((dset_size = H5Sget_simple_extent_npoints(filespace)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sget_simple_extent_npoints failed.\n";
}
if ((filetype = H5Dget_type(dset_id)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dget_type failed.\n";
}
if ((filetype_size = H5Tget_size(filetype)) == 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Tget_size failed.\n";
}
if (H5Tclose(filetype) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Tclose failed.\n";
};
if ((data_slice = (float *)malloc((size_t)dset_size * filetype_size)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "malloc of data_slice failed.\n";
}
if (pass) {
if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
}
}
if (pass) {
if ((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
}
}
@@ -726,7 +726,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
if (H5Pinsert2(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE,
&prop_value, NULL, NULL, NULL, NULL, NULL, NULL) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pinsert2() failed\n";
}
}
@@ -735,21 +735,21 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* read H5S_ALL section */
if (pass) {
if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, dxpl_id, data_slice)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dread() failed\n";
}
}
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
if (pass) {
- prop_value = FALSE;
+ prop_value = false;
if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pget() failed\n";
}
if (pass) {
- if (prop_value != TRUE) {
- pass = FALSE;
+ if (prop_value != true) {
+ pass = false;
failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
}
}
@@ -773,7 +773,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
* the compiler.
*/
if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected dset contents.\n";
}
nextValue += 1;
@@ -787,7 +787,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass) {
prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
if (H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset() failed\n";
}
}
@@ -798,21 +798,21 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
}
if (pass) {
if ((H5Dread(dset_id_ch, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, dxpl_id, data_slice)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dread() failed\n";
}
}
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
if (pass) {
- prop_value = FALSE;
+ prop_value = false;
if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pget() failed\n";
}
if (pass) {
- if (prop_value == TRUE) {
- pass = FALSE;
+ if (prop_value == true) {
+ pass = false;
failure_mssg = "rank 0 Bcast optimization was mistakenly performed for chunked dataset\n";
}
}
@@ -836,7 +836,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
* the compiler.
*/
if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected chunked dset contents.\n";
}
nextValue += 1;
@@ -846,7 +846,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (filespace != -1)) {
if (H5Sclose(filespace) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(filespace) failed.\n";
}
}
@@ -862,7 +862,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
*/
if ((data_slice = (float *)malloc((size_t)(dset_size * 2) * filetype_size)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "malloc of data_slice failed.\n";
}
@@ -870,14 +870,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass) {
dims[0] = (hsize_t)dset_size * 2;
if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
}
}
if (pass) {
offset = (hsize_t)dset_size;
if ((H5Sselect_hyperslab(memspace, H5S_SELECT_SET, &offset, NULL, &offset, NULL)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sselect_hyperslab() failed\n";
}
}
@@ -886,7 +886,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass) {
prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
if (H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset() failed\n";
}
}
@@ -895,21 +895,21 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* read this processes section of the data */
if (pass) {
if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, H5S_ALL, dxpl_id, data_slice)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dread() failed\n";
}
}
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
if (pass) {
- prop_value = FALSE;
+ prop_value = false;
if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pget() failed\n";
}
if (pass) {
- if (prop_value != TRUE) {
- pass = FALSE;
+ if (prop_value != true) {
+ pass = false;
failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
}
}
@@ -933,7 +933,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
* the compiler.
*/
if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected dset contents.\n";
}
nextValue += 1;
@@ -943,7 +943,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (memspace != -1)) {
if (H5Sclose(memspace) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
@@ -956,7 +956,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (pass || (dxpl_id != -1)) {
if (H5Pclose(dxpl_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(dxpl_id) failed.\n";
}
}
@@ -965,28 +965,28 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
/* close file, etc. */
if (pass || (dset_id != -1)) {
if (H5Dclose(dset_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose(dset_id) failed.\n";
}
}
if (pass || (dset_id_ch != -1)) {
if (H5Dclose(dset_id_ch) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
}
}
if (pass || (file_id != -1)) {
if (H5Fclose(file_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Fclose(file_id) failed.\n";
}
}
if (pass || (fapl_id != -1)) {
if (H5Pclose(fapl_id) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pclose(fapl_id) failed.\n";
}
}
@@ -999,12 +999,12 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
if (MPI_Allreduce(&local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS) {
if (pass) {
- pass = FALSE;
+ pass = false;
failure_mssg = "MPI_Allreduce() failed.\n";
}
}
else if ((pass) && (global_failures > 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "One or more processes report failure.\n";
}
@@ -1032,7 +1032,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
*
* The test consists of creating two separate HDF datasets
* in which random text is inserted at the start of each
- * file using the 'j5jam' application. This forces the
+ * file using the 'h5jam' application. This forces the
* HDF5 file superblock to a non-zero offset.
* Having created the two independent files, we create two
* non-overlapping MPI groups, each of which is then tasked
@@ -1055,15 +1055,15 @@ main(int argc, char **argv)
MPI_Comm group_comm = MPI_COMM_NULL;
/* I don't believe that argv[0] can ever be NULL.
- * It should thus be safe to dup and save as a check
- * for cmake testing. Note that in our Cmake builds,
+ * It should thus be safe to duplicate and save as a check
+ * for CMake testing. Note that in our CMake builds,
* all executables are located in the same directory.
* We assume (but we'll check) that the h5jam utility
* is in the directory as this executable. If that
* isn't true, then we can use a relative path that
- * should be valid for the autotools environment.
+ * should be valid for the Autotools environment.
*/
- test_argv0 = HDstrdup(argv[0]);
+ test_argv0 = strdup(argv[0]);
if ((MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
fprintf(stderr, "FATAL: Unable to initialize MPI\n");
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index c6eb99c..de36abf 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -114,12 +114,12 @@ test_plist_ed(void)
hsize_t max_size[1]; /*data space maximum size */
const char *c_to_f = "x+32";
H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION,
- TRUE,
- FALSE,
- FALSE,
+ true,
+ false,
+ false,
"temp",
- TRUE,
- FALSE,
+ true,
+ false,
(2 * 2048 * 1024),
0.3,
(64 * 1024 * 1024),
@@ -128,7 +128,7 @@ test_plist_ed(void)
H5C_incr__threshold,
0.8,
3.0,
- TRUE,
+ true,
(8 * 1024 * 1024),
H5C_flash_incr__add_space,
2.0,
@@ -136,10 +136,10 @@ test_plist_ed(void)
H5C_decr__age_out_with_threshold,
0.997,
0.8,
- TRUE,
+ true,
(3 * 1024 * 1024),
3,
- FALSE,
+ false,
0.2,
(256 * 2048),
H5AC__DEFAULT_METADATA_WRITE_STRATEGY};
@@ -285,7 +285,7 @@ test_plist_ed(void)
lcpl = H5Pcreate(H5P_LINK_CREATE);
VRFY((lcpl >= 0), "H5Pcreate succeeded");
- ret = H5Pset_create_intermediate_group(lcpl, TRUE);
+ ret = H5Pset_create_intermediate_group(lcpl, true);
VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
@@ -563,7 +563,7 @@ external_links(void)
/* test opening a group that is to an external link, the external linked
file should inherit the source file's access properties */
- HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
+ snprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
group = H5Gopen2(fid, link_path, H5P_DEFAULT);
VRFY((group >= 0), "H5Gopen succeeded");
ret = H5Gclose(group);
@@ -594,10 +594,10 @@ external_links(void)
VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
tri_status = H5Lexists(fid, link_path, H5P_DEFAULT);
- VRFY((tri_status == TRUE), "H5Lexists succeeded");
+ VRFY((tri_status == true), "H5Lexists succeeded");
tri_status = H5Lexists(fid, link_path, lapl);
- VRFY((tri_status == TRUE), "H5Lexists succeeded");
+ VRFY((tri_status == true), "H5Lexists succeeded");
group = H5Oopen(fid, link_path, H5P_DEFAULT);
VRFY((group >= 0), "H5Oopen succeeded");
diff --git a/testpar/t_select_io_dset.c b/testpar/t_select_io_dset.c
index daeacf0..2e6839e 100644
--- a/testpar/t_select_io_dset.c
+++ b/testpar/t_select_io_dset.c
@@ -45,7 +45,7 @@ int curr_nerrors = 0;
if (err_result == 0) \
PASSED(); \
else \
- HDputs(" ***TEST FAILED***"); \
+ puts(" ***TEST FAILED***"); \
} \
} while (0)
@@ -153,7 +153,7 @@ set_dxpl(hid_t dxpl, H5D_selection_io_mode_t select_io_mode, H5FD_mpio_xfer_t mp
P_TEST_ERROR;
if (mwbuf)
- if (H5Pset_modify_write_buf(dxpl, TRUE) < 0)
+ if (H5Pset_modify_write_buf(dxpl, true) < 0)
P_TEST_ERROR;
} /* set_dxpl() */
@@ -227,8 +227,8 @@ test_no_type_conv(hid_t fid, unsigned chunked, unsigned dtrans, unsigned mwbuf)
}
/* Generate dataset name */
- HDsnprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s", chunked ? "chunked" : "contig",
- dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_name, sizeof(dset_name), "no_tconv_%s_%s_%s", chunked ? "chunked" : "contig",
+ dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
/* Create dataset */
if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
@@ -379,8 +379,8 @@ test_no_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf)
}
/* Generate dataset name */
- HDsnprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s", chunked ? "chunked" : "contig",
- mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_name, sizeof(dset_name), "no_size_change_%s_%s", chunked ? "chunked" : "contig",
+ mwbuf ? "mwbuf" : "nomwbuf");
/* Create 1d dataset */
if ((did = H5Dcreate2(fid, dset_name, H5T_STD_I32BE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
@@ -525,8 +525,8 @@ test_larger_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsign
}
/* Generate dataset name */
- HDsnprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig",
- dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_name, sizeof(dset_name), "larger_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig",
+ dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
/* Create 1d chunked dataset with/without data transform */
if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
@@ -673,8 +673,8 @@ test_smaller_mem_type_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsig
}
/* Generate dataset name */
- HDsnprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig",
- dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_name, sizeof(dset_name), "smaller_no_bkg_%s_%s_%s", chunked ? "chunked" : "contig",
+ dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
/* Create 1d chunked dataset with/without data transform */
if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
@@ -870,8 +870,8 @@ test_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf)
/* Case 5(a) */
/* Generate dataset name */
- HDsnprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s", chunked ? "chunked" : "contig",
- mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_name, sizeof(dset_name), "cmpd_with_bkg_%s_%s", chunked ? "chunked" : "contig",
+ mwbuf ? "mwbuf" : "nomwbuf");
/* Create 1d dataset */
if ((did = H5Dcreate2(fid, dset_name, s1_tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
@@ -1158,8 +1158,8 @@ test_type_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, unsigned
}
/* Generate dataset name */
- HDsnprintf(dset_name, sizeof(dset_name), "tconv_sel_empty_%s_%s_%s", chunked ? "chunked" : "contig",
- dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_name, sizeof(dset_name), "tconv_sel_empty_%s_%s_%s", chunked ? "chunked" : "contig",
+ dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
/* Create dataset */
if ((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
@@ -1473,8 +1473,8 @@ test_multi_dsets_no_bkg(hid_t fid, unsigned chunked, unsigned dtrans, unsigned m
P_TEST_ERROR;
/* Generate dataset name */
- HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i,
- chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_dset%d_%s_%s_%s", i,
+ chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
/* Create ith dataset */
if ((dset_dids[i] =
@@ -1794,8 +1794,8 @@ test_multi_dsets_cmpd_with_bkg(hid_t fid, unsigned chunked, unsigned mwbuf)
P_TEST_ERROR;
/* Generate dataset name */
- HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i,
- chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_cmpd_dset%d_%s_%s", i,
+ chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
/* Create ith dataset */
if ((dset_dids[i] =
@@ -2230,8 +2230,8 @@ test_multi_dsets_size_change_no_bkg(hid_t fid, unsigned chunked, unsigned mwbuf)
P_TEST_ERROR;
/* Generate dataset name */
- HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i,
- chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_size_dset%d_%s_%s", i,
+ chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
/* Create ith dataset */
if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_STD_I32BE, file_sids[i], H5P_DEFAULT, dcpl,
@@ -2584,8 +2584,8 @@ test_multi_dsets_conv_sel_empty(hid_t fid, unsigned chunked, unsigned dtrans, un
P_TEST_ERROR;
/* Generate dataset name */
- HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_sel_dset%d_%s_%s_%s", i,
- chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_sel_dset%d_%s_%s_%s", i,
+ chunked ? "chunked" : "contig", dtrans ? "xform" : "noxform", mwbuf ? "mwbuf" : "nomwbuf");
if (i == 0) {
if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT,
@@ -3039,24 +3039,24 @@ test_multi_dsets_all(int niter, hid_t fid, unsigned chunked, unsigned mwbuf)
mm = HDrandom() % (int)ndsets;
if (mm == 0) {
dset_types[i] = DSET_WITH_NO_CONV;
- HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s", i,
- chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_nconv_dset%d_%s_%s", i,
+ chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_INT, file_sids[i], H5P_DEFAULT,
dcpl, H5P_DEFAULT)) < 0)
P_TEST_ERROR;
}
else if (mm == 1) {
dset_types[i] = DSET_WITH_CONV_AND_NO_BKG;
- HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s", i,
- chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_nbkg_dset%d_%s_%s", i,
+ chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], H5T_NATIVE_LONG, file_sids[i], H5P_DEFAULT,
dcpl, H5P_DEFAULT)) < 0)
P_TEST_ERROR;
}
else {
dset_types[i] = DSET_WITH_CONV_AND_BKG;
- HDsnprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s", i,
- chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
+ snprintf(dset_names[i], sizeof(dset_names[i]), "multi_all_conv_bkg_dset%d_%s_%s", i,
+ chunked ? "chunked" : "contig", mwbuf ? "mwbuf" : "nomwbuf");
if ((dset_dids[i] = H5Dcreate2(fid, dset_names[i], s1_tid, file_sids[i], H5P_DEFAULT, dcpl,
H5P_DEFAULT)) < 0)
P_TEST_ERROR;
@@ -3407,7 +3407,7 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_
hid_t sid = H5I_INVALID_HID;
hsize_t dims[1];
hsize_t cdims[1];
- hbool_t is_chunked = FALSE;
+ bool is_chunked = false;
hid_t tid = H5T_NATIVE_INT;
uint32_t no_selection_io_cause_write = 0;
uint32_t no_selection_io_cause_read = 0;
@@ -3464,7 +3464,7 @@ test_no_selection_io_cause_mode(const char *filename, hid_t fapl, uint32_t test_
P_TEST_ERROR;
if (test_mode & TEST_IN_PLACE_TCONV) {
- if (H5Pset_modify_write_buf(dxpl, TRUE) < 0)
+ if (H5Pset_modify_write_buf(dxpl, true) < 0)
P_TEST_ERROR;
}
/* In-place type conversion for read doesn't require modify_write_buf */
@@ -3972,14 +3972,14 @@ main(int argc, char *argv[])
P_TEST_ERROR;
/* Test with contiguous or chunked dataset */
- for (chunked = FALSE; chunked <= TRUE; chunked++) {
+ for (chunked = false; chunked <= true; chunked++) {
/* Data transforms only apply to integer or floating-point datasets */
/* therefore, not all tests are run with data transform */
- for (dtrans = FALSE; dtrans <= TRUE; dtrans++) {
+ for (dtrans = false; dtrans <= true; dtrans++) {
/* Test with and without modify_write_buf turned on */
- for (mwbuf = FALSE; mwbuf <= TRUE; mwbuf++) {
+ for (mwbuf = false; mwbuf <= true; mwbuf++) {
if (MAINPROCESS) {
/* Print configuration message */
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 55e7328..98e3077 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -112,14 +112,14 @@ struct hs_dr_pio_test_vars_t {
static void
hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size,
const int chunk_edge_size, const int small_rank, const int large_rank,
- const hbool_t use_collective_io, const hid_t dset_type, const int express_test,
+ const bool use_collective_io, const hid_t dset_type, const int express_test,
struct hs_dr_pio_test_vars_t *tv_ptr)
{
#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
const char *fcnName = "hs_dr_pio_test__setup()";
#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
const char *filename;
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i;
int mrc;
int mpi_rank; /* needed by the VRFY macro */
@@ -452,7 +452,7 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
/* verify that the correct data was written to the small data set */
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->small_ds_buf_1;
i = 0;
@@ -460,12 +460,12 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "small ds init data good.");
+ VRFY((mis_match == false), "small ds init data good.");
/* setup selections for writing initial data to the large data set */
@@ -523,7 +523,7 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
/* verify that the correct data was written to the large data set */
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->large_ds_buf_1;
i = 0;
@@ -531,12 +531,12 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "large ds init data good.");
+ VRFY((mis_match == false), "large ds init data good.");
/* sync with the other processes before changing data */
mrc = MPI_Barrier(MPI_COMM_WORLD);
@@ -684,7 +684,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -806,7 +806,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
@@ -824,7 +824,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
/* verify that expected data is retrieved */
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->small_ds_slice_buf;
expected_value =
(uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
@@ -836,7 +836,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
*ptr_1 = 0; /* zero data for next use */
@@ -845,7 +845,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
expected_value++;
}
- VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+ VRFY((mis_match == false), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -892,7 +892,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -1035,7 +1035,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
@@ -1071,7 +1071,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
expected_value++;
}
@@ -1079,7 +1079,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- mis_match = TRUE;
+ mis_match = true;
}
}
/* zero out the value for the next pass */
@@ -1088,7 +1088,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+ VRFY((mis_match == false), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -1137,7 +1137,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -1301,7 +1301,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* on disk full square selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+ VRFY((check == true), "H5Sselect_shape_same passed.");
/* write the slice from the in memory large data set to the
* slice of the on disk small dataset. */
@@ -1324,7 +1324,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
/* verify that expected data is retrieved */
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = tv_ptr->small_ds_buf_1;
expected_value =
@@ -1345,7 +1345,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
expected_value++;
}
@@ -1353,7 +1353,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- mis_match = TRUE;
+ mis_match = true;
}
}
/* zero out the value for the next pass */
@@ -1362,7 +1362,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE), "small slice write from large ds data good.");
+ VRFY((mis_match == false), "small slice write from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -1413,7 +1413,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i, j, k, l;
size_t n;
int mpi_rank; /* needed by the VRFY macro */
@@ -1584,7 +1584,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* write the small data set slice from memory to the
* target slice of the disk data set
@@ -1632,7 +1632,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
expected_value++;
@@ -1641,7 +1641,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- mis_match = TRUE;
+ mis_match = true;
}
}
/* zero out buffer for next test */
@@ -1649,7 +1649,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE), "small ds slice write to large ds slice data good.");
+ VRFY((mis_match == false), "small ds slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
}
@@ -1683,7 +1683,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
static void
contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size,
- const int small_rank, const int large_rank, const hbool_t use_collective_io,
+ const int small_rank, const int large_rank, const bool use_collective_io,
const hid_t dset_type, int express_test, int *skips_ptr, int max_skips,
int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr)
{
@@ -1701,7 +1701,7 @@ contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
/* int chunk_edge_size = */ -1,
/* int small_rank = */ -1,
/* int large_rank = */ -1,
- /* hid_t dset_type = */ -1,
+ /* hid_t dset_type = */ H5I_INVALID_HID,
/* uint32_t * small_ds_buf_0 = */ NULL,
/* uint32_t * small_ds_buf_1 = */ NULL,
/* uint32_t * small_ds_buf_2 = */ NULL,
@@ -1712,24 +1712,24 @@ contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
/* int large_ds_offset = */ -1,
- /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t fid = */ H5I_INVALID_HID, /* HDF5 file ID */
/* hid_t xfer_plist = */ H5P_DEFAULT,
- /* hid_t full_mem_small_ds_sid = */ -1,
- /* hid_t full_file_small_ds_sid = */ -1,
- /* hid_t mem_small_ds_sid = */ -1,
- /* hid_t file_small_ds_sid_0 = */ -1,
- /* hid_t file_small_ds_sid_1 = */ -1,
- /* hid_t small_ds_slice_sid = */ -1,
- /* hid_t full_mem_large_ds_sid = */ -1,
- /* hid_t full_file_large_ds_sid = */ -1,
- /* hid_t mem_large_ds_sid = */ -1,
- /* hid_t file_large_ds_sid_0 = */ -1,
- /* hid_t file_large_ds_sid_1 = */ -1,
- /* hid_t file_large_ds_process_slice_sid = */ -1,
- /* hid_t mem_large_ds_process_slice_sid = */ -1,
- /* hid_t large_ds_slice_sid = */ -1,
- /* hid_t small_dataset = */ -1, /* Dataset ID */
- /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* hid_t full_mem_small_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t full_file_small_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t mem_small_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t file_small_ds_sid_0 = */ H5I_INVALID_HID,
+ /* hid_t file_small_ds_sid_1 = */ H5I_INVALID_HID,
+ /* hid_t small_ds_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t full_mem_large_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t full_file_large_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t mem_large_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t file_large_ds_sid_0 = */ H5I_INVALID_HID,
+ /* hid_t file_large_ds_sid_1 = */ H5I_INVALID_HID,
+ /* hid_t file_large_ds_process_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t mem_large_ds_process_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t large_ds_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t small_dataset = */ H5I_INVALID_HID, /* Dataset ID */
+ /* hid_t large_dataset = */ H5I_INVALID_HID, /* Dataset ID */
/* size_t small_ds_size = */ 1,
/* size_t small_ds_slice_size = */ 1,
/* size_t large_ds_size = */ 1,
@@ -1924,7 +1924,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 0;
contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, FALSE, dset_type, express_test, &skips,
+ large_rank, false, dset_type, express_test, &skips,
max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
@@ -1935,7 +1935,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 0;
contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, TRUE, dset_type, express_test, &skips,
+ large_rank, true, dset_type, express_test, &skips,
max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
@@ -1946,7 +1946,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 5;
contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, FALSE, dset_type, express_test, &skips,
+ large_rank, false, dset_type, express_test, &skips,
max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
@@ -1957,14 +1957,14 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size = 5;
contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank,
- large_rank, TRUE, dset_type, express_test, &skips,
+ large_rank, true, dset_type, express_test, &skips,
max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case COL_CHUNKED */
default:
- VRFY((FALSE), "unknown test type");
+ VRFY((false), "unknown test type");
break;
} /* end of switch(sstest_type) */
@@ -2019,7 +2019,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
#endif
- hbool_t first_selection = TRUE;
+ bool first_selection = true;
int i, j, k, l, m;
int n_cube_offset;
int sel_offset;
@@ -2222,7 +2222,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
if (first_selection) {
- first_selection = FALSE;
+ first_selection = false;
ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]),
&(stride[n_cube_offset]), &(count[n_cube_offset]),
@@ -2294,7 +2294,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
** ckrbrd_hs_dr_pio_test__verify_data():
**
** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** expected data. Return true if it does, and false
** otherwise.
**
** The supplied buffer is presumed to this process's slice
@@ -2347,17 +2347,17 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, cons
#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0
-static hbool_t
+static bool
ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size,
const int checker_edge_size, uint32_t first_expected_val,
- hbool_t buf_starts_in_checker)
+ bool buf_starts_in_checker)
{
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
#endif
- hbool_t good_data = TRUE;
- hbool_t in_checker;
- hbool_t start_in_checker[5];
+ bool good_data = true;
+ bool in_checker;
+ bool start_in_checker[5];
uint32_t expected_value;
uint32_t *val_ptr;
int i, j, k, l, m; /* to track position in n-cube */
@@ -2449,7 +2449,7 @@ do {
if (*val_ptr != expected_value) {
- good_data = FALSE;
+ good_data = false;
}
/* zero out buffer for reuse */
@@ -2457,7 +2457,7 @@ do {
}
else if (*val_ptr != 0) {
- good_data = FALSE;
+ good_data = false;
/* zero out buffer for reuse */
*val_ptr = 0;
@@ -2519,7 +2519,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
uint32_t *ptr_0;
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
uint32_t expected_value;
int mpi_rank; /* needed by VRFY */
@@ -2671,7 +2671,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
@@ -2702,9 +2702,9 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
data_ok = ckrbrd_hs_dr_pio_test__verify_data(
tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+ tv_ptr->checker_edge_size, expected_value, (bool)true);
- VRFY((data_ok == TRUE), "small slice read from large ds data good.");
+ VRFY((data_ok == true), "small slice read from large ds data good.");
(tv_ptr->tests_run)++;
}
@@ -2751,7 +2751,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
size_t u;
size_t start_index;
@@ -2893,7 +2893,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* selections as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* Read selection from disk */
#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
@@ -2911,7 +2911,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
/* verify that the expected data and only the
* expected data was read.
*/
- data_ok = TRUE;
+ data_ok = true;
ptr_1 = tv_ptr->large_ds_buf_1;
expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index =
@@ -2951,7 +2951,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
}
/* zero out the value for the next pass */
@@ -2960,13 +2960,13 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small to large ds data good(1).");
+ VRFY((data_ok == true), "slice read from small to large ds data good(1).");
data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1,
tv_ptr->edge_size, tv_ptr->checker_edge_size,
- expected_value, (hbool_t)TRUE);
+ expected_value, (bool)true);
- VRFY((data_ok == TRUE), "slice read from small to large ds data good(2).");
+ VRFY((data_ok == true), "slice read from small to large ds data good(2).");
ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1;
@@ -2974,7 +2974,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
}
/* zero out the value for the next pass */
@@ -2983,7 +2983,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small to large ds data good(3).");
+ VRFY((data_ok == true), "slice read from small to large ds data good(3).");
(tv_ptr->tests_run)++;
}
@@ -3034,7 +3034,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
size_t u;
size_t start_index;
@@ -3215,7 +3215,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
* slice of the small data set as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+ VRFY((check == true), "H5Sselect_shape_same passed.");
/* write the checker board selection of the slice from the in
* memory large data set to the slice of the on disk small
@@ -3252,33 +3252,33 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
assert(start_index < stop_index);
assert(stop_index <= tv_ptr->small_ds_size);
- data_ok = TRUE;
+ data_ok = true;
ptr_1 = tv_ptr->small_ds_buf_1;
for (u = 0; u < start_index; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+ tv_ptr->checker_edge_size, expected_value, (bool)true);
ptr_1 = tv_ptr->small_ds_buf_1;
for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
- VRFY((data_ok == TRUE), "large slice write slice to small slice data good.");
+ VRFY((data_ok == true), "large slice write slice to small slice data good.");
(tv_ptr->tests_run)++;
}
@@ -3329,7 +3329,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t data_ok = FALSE;
+ bool data_ok = false;
int i, j, k, l;
size_t u;
size_t start_index;
@@ -3506,7 +3506,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
* as having the same shape.
*/
check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
- VRFY((check == TRUE), "H5Sselect_shape_same passed");
+ VRFY((check == true), "H5Sselect_shape_same passed");
/* write the small data set slice from memory to the
* target slice of the disk data set
@@ -3546,33 +3546,33 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
assert(start_index < stop_index);
assert(stop_index < tv_ptr->large_ds_size);
- data_ok = TRUE;
+ data_ok = true;
ptr_1 = tv_ptr->large_ds_buf_1;
for (u = 0; u < start_index; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
- tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+ tv_ptr->checker_edge_size, expected_value, (bool)true);
ptr_1 = tv_ptr->large_ds_buf_1;
for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
if (*ptr_1 != 0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = 0;
}
}
- VRFY((data_ok == TRUE), "small ds cb slice write to large ds slice data good.");
+ VRFY((data_ok == true), "small ds cb slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
}
@@ -3607,9 +3607,9 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
static void
ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size,
const int chunk_edge_size, const int small_rank, const int large_rank,
- const hbool_t use_collective_io, const hid_t dset_type,
- const int express_test, int *skips_ptr, int max_skips,
- int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr)
+ const bool use_collective_io, const hid_t dset_type, const int express_test,
+ int *skips_ptr, int max_skips, int64_t *total_tests_ptr,
+ int64_t *tests_run_ptr, int64_t *tests_skipped_ptr)
{
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
@@ -3626,7 +3626,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
/* int chunk_edge_size = */ -1,
/* int small_rank = */ -1,
/* int large_rank = */ -1,
- /* hid_t dset_type = */ -1,
+ /* hid_t dset_type = */ H5I_INVALID_HID,
/* uint32_t * small_ds_buf_0 = */ NULL,
/* uint32_t * small_ds_buf_1 = */ NULL,
/* uint32_t * small_ds_buf_2 = */ NULL,
@@ -3637,24 +3637,24 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const i
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
/* int large_ds_offset = */ -1,
- /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t fid = */ H5I_INVALID_HID, /* HDF5 file ID */
/* hid_t xfer_plist = */ H5P_DEFAULT,
- /* hid_t full_mem_small_ds_sid = */ -1,
- /* hid_t full_file_small_ds_sid = */ -1,
- /* hid_t mem_small_ds_sid = */ -1,
- /* hid_t file_small_ds_sid_0 = */ -1,
- /* hid_t file_small_ds_sid_1 = */ -1,
- /* hid_t small_ds_slice_sid = */ -1,
- /* hid_t full_mem_large_ds_sid = */ -1,
- /* hid_t full_file_large_ds_sid = */ -1,
- /* hid_t mem_large_ds_sid = */ -1,
- /* hid_t file_large_ds_sid_0 = */ -1,
- /* hid_t file_large_ds_sid_1 = */ -1,
- /* hid_t file_large_ds_process_slice_sid = */ -1,
- /* hid_t mem_large_ds_process_slice_sid = */ -1,
- /* hid_t large_ds_slice_sid = */ -1,
- /* hid_t small_dataset = */ -1, /* Dataset ID */
- /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* hid_t full_mem_small_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t full_file_small_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t mem_small_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t file_small_ds_sid_0 = */ H5I_INVALID_HID,
+ /* hid_t file_small_ds_sid_1 = */ H5I_INVALID_HID,
+ /* hid_t small_ds_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t full_mem_large_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t full_file_large_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t mem_large_ds_sid = */ H5I_INVALID_HID,
+ /* hid_t file_large_ds_sid_0 = */ H5I_INVALID_HID,
+ /* hid_t file_large_ds_sid_1 = */ H5I_INVALID_HID,
+ /* hid_t file_large_ds_process_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t mem_large_ds_process_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t large_ds_slice_sid = */ H5I_INVALID_HID,
+ /* hid_t small_dataset = */ H5I_INVALID_HID, /* Dataset ID */
+ /* hid_t large_dataset = */ H5I_INVALID_HID, /* Dataset ID */
/* size_t small_ds_size = */ 1,
/* size_t small_ds_slice_size = */ 1,
/* size_t large_ds_size = */ 1,
@@ -3838,7 +3838,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* contiguous data set, independent I/O */
chunk_edge_size = 0;
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, FALSE, dset_type, express_test,
+ small_rank, large_rank, false, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
&tests_skipped);
test_num++;
@@ -3849,7 +3849,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* contiguous data set, collective I/O */
chunk_edge_size = 0;
ckrbrd_hs_dr_pio_test__run_test(
- test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, TRUE,
+ test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, true,
dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
@@ -3859,7 +3859,7 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, independent I/O */
chunk_edge_size = 5;
ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
- small_rank, large_rank, FALSE, dset_type, express_test,
+ small_rank, large_rank, false, dset_type, express_test,
&skips, max_skips, &total_tests, &tests_run,
&tests_skipped);
test_num++;
@@ -3870,14 +3870,14 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/* chunked data set, collective I/O */
chunk_edge_size = 5;
ckrbrd_hs_dr_pio_test__run_test(
- test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, TRUE,
+ test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, true,
dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped);
test_num++;
break;
/* end of case COL_CHUNKED */
default:
- VRFY((FALSE), "unknown test type");
+ VRFY((false), "unknown test type");
break;
} /* end of switch(sstest_type) */
@@ -4142,7 +4142,7 @@ parse_options(int argc, char **argv)
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
+ hid_t ret_pl = H5I_INVALID_HID;
herr_t ret; /* generic return value */
int mpi_rank; /* mpi variables */
@@ -4159,9 +4159,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
VRFY((ret >= 0), "");
- ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ ret = H5Pset_coll_metadata_write(ret_pl, true);
VRFY((ret >= 0), "");
return (ret_pl);
}
@@ -4260,6 +4260,8 @@ main(int argc, char **argv)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ mpi_rank_framework_g = mpi_rank;
+
dim0 = ROW_FACTOR * mpi_size;
dim1 = COL_FACTOR * mpi_size;
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 7614165..e4ff258 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -399,7 +399,7 @@ coll_write_test(int chunk_factor)
For testing collective hyperslab selection write
In this test, we are using independent read to check
- the correctedness of collective write compared with
+ the correctness of collective write compared with
independent write,
In order to thoroughly test this feature, we choose
@@ -494,7 +494,7 @@ coll_write_test(int chunk_factor)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
/*
- * Select two hyperslabs in memory. Hyperslabs has the same
+ * Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace
* Only the starting point is different.
* The first selection
@@ -734,7 +734,7 @@ coll_read_test(void)
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
/*
- * Select two hyperslabs in memory. Hyperslabs has the same
+ * Select two hyperslabs in memory. Hyperslabs have the same
* size and shape as the selected hyperslabs for the file dataspace.
* Only the starting point is different.
* The first selection
@@ -868,7 +868,7 @@ coll_read_test(void)
** sel_rank fastest changing indices, with origin (in the
** higher indices) as indicated by the start array.
**
-** Note that this function, is hard coded to presume a
+** Note that this function is hard-coded to presume a
** maximum dataspace rank of 5.
**
** While this maximum is declared as a constant, increasing
@@ -894,7 +894,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
const char *fcnName = "lower_dim_size_comp_test__select_checker_board():";
#endif
- hbool_t first_selection = TRUE;
+ bool first_selection = true;
int i, j, k, l, m;
int ds_offset;
int sel_offset;
@@ -946,7 +946,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
* Note that the following computation depends on the C99
* requirement that integer division discard any fraction
* (truncation towards zero) to function correctly. As we
- * now require C99, this shouldn't be a problem, but noting
+ * now require C99, this shouldn't be a problem, but note
* it may save us some pain if we are ever obliged to support
* pre-C99 compilers again.
*/
@@ -975,7 +975,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
/* Now set up the stride and block arrays, and portions of the start
* and count arrays that will not be altered during the selection of
- * the checker board.
+ * the checkerboard.
*/
i = 0;
while (i < ds_offset) {
@@ -1116,7 +1116,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
if (first_selection) {
- first_selection = FALSE;
+ first_selection = false;
ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]),
&(stride[ds_offset]), &(count[ds_offset]),
@@ -1192,16 +1192,16 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
** lower_dim_size_comp_test__verify_data():
**
** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** expected data. Return true if it does, and false
** otherwise.
**
-** The supplied buffer is presumed to this process's slice
+** The supplied buffer is presumed to be this process's slice
** of the target data set. Each such slice will be an
** n-cube of rank (rank -1) and the supplied edge_size with
** origin (mpi_rank, 0, ... , 0) in the target data set.
**
** Further, the buffer is presumed to be the result of reading
-** or writing a checker board selection of an m (1 <= m <
+** or writing a checkerboard selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
** of the target data set. Also, this slice must be parallel
** to the fastest changing indices.
@@ -1220,7 +1220,7 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
**
** Further, supposing that this is process 10, this process's
** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialize (prior
+** (10, 0, 0) in the data set, and would be initialized (prior
** to the checkerboard selection) as follows:
**
** 1000, 1001, 1002, ... 1008, 1009
@@ -1245,20 +1245,20 @@ lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t t
#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0
-static hbool_t
+static bool
lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
const int mpi_rank,
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
const int rank, const int edge_size, const int checker_edge_size,
- uint32_t first_expected_val, hbool_t buf_starts_in_checker)
+ uint32_t first_expected_val, bool buf_starts_in_checker)
{
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
const char *fcnName = "lower_dim_size_comp_test__verify_data():";
#endif
- hbool_t good_data = TRUE;
- hbool_t in_checker;
- hbool_t start_in_checker[5];
+ bool good_data = true;
+ bool in_checker;
+ bool start_in_checker[5];
uint32_t expected_value;
uint32_t *val_ptr;
int i, j, k, l, m; /* to track position in n-cube */
@@ -1351,7 +1351,7 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
if (*val_ptr != expected_value) {
- good_data = FALSE;
+ good_data = false;
}
/* zero out buffer for reuse */
@@ -1359,7 +1359,7 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
}
else if (*val_ptr != 0) {
- good_data = FALSE;
+ good_data = false;
/* zero out buffer for reuse */
*val_ptr = 0;
@@ -1408,7 +1408,7 @@ lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
#define LDSCT_DS_RANK 5
static void
-lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io,
+lower_dim_size_comp_test__run_test(const int chunk_edge_size, const bool use_collective_io,
const hid_t dset_type)
{
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
@@ -1418,8 +1418,8 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
hsize_t max_dims[32];
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
const char *filename;
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
+ bool data_ok = false;
+ bool mis_match = false;
int i;
int start_index;
int stop_index;
@@ -1761,10 +1761,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
}
check = H5Sselect_valid(mem_small_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(mem_small_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(mem_small_ds_sid) returns true");
check = H5Sselect_valid(file_small_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(file_small_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(file_small_ds_sid) returns true");
/* write the initial value of the small data set to file */
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
@@ -1791,7 +1791,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* and reset the buffer to zero in passing.
*/
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = small_ds_buf_1;
i = 0;
@@ -1799,7 +1799,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
*ptr_1 = (uint32_t)0;
@@ -1807,7 +1807,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "small ds init data good.");
+ VRFY((mis_match == false), "small ds init data good.");
/* setup selections for writing initial data to the large data set */
start[0] = (hsize_t)(mpi_rank + 1);
@@ -1915,10 +1915,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
check = H5Sselect_valid(mem_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(mem_large_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(mem_large_ds_sid) returns true");
check = H5Sselect_valid(file_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_valid(file_large_ds_sid) returns TRUE");
+ VRFY((check == true), "H5Sselect_valid(file_large_ds_sid) returns true");
/* write the initial value of the large data set to file */
#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
@@ -1952,7 +1952,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* in passing, reset the buffer to zeros
*/
expected_value = 0;
- mis_match = FALSE;
+ mis_match = false;
ptr_1 = large_ds_buf_1;
i = 0;
@@ -1960,7 +1960,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != expected_value) {
- mis_match = TRUE;
+ mis_match = true;
}
*ptr_1 = (uint32_t)0;
@@ -1968,7 +1968,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
ptr_1++;
expected_value++;
}
- VRFY((mis_match == FALSE), "large ds init data good.");
+ VRFY((mis_match == false), "large ds init data good.");
/***********************************/
/***** INITIALIZATION COMPLETE *****/
@@ -2006,7 +2006,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* selections as having the same shape.
*/
check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed (1)");
+ VRFY((check == true), "H5Sselect_shape_same passed (1)");
ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist,
large_ds_buf_1);
@@ -2021,7 +2021,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* verify that expected data is retrieved */
- data_ok = TRUE;
+ data_ok = true;
start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
(large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
@@ -2040,14 +2040,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != (uint32_t)0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small ds data good(1).");
+ VRFY((data_ok == true), "slice read from small ds data good(1).");
data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
@@ -2056,11 +2056,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* rank */ 2,
/* edge_size */ 10,
/* checker_edge_size */ 3, expected_value,
- /* buf_starts_in_checker */ TRUE);
+ /* buf_starts_in_checker */ true);
- VRFY((data_ok == TRUE), "slice read from small ds data good(2).");
+ VRFY((data_ok == true), "slice read from small ds data good(2).");
- data_ok = TRUE;
+ data_ok = true;
ptr_1 += small_ds_slice_size;
@@ -2068,14 +2068,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != (uint32_t)0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from small ds data good(3).");
+ VRFY((data_ok == true), "slice read from small ds data good(3).");
/* read a checkerboard selection of a slice of the process slice of
* the large on disk data set into the process slice of the small
@@ -2103,7 +2103,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
* selections as having the same shape.
*/
check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
- VRFY((check == TRUE), "H5Sselect_shape_same passed (2)");
+ VRFY((check == true), "H5Sselect_shape_same passed (2)");
ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist,
small_ds_buf_1);
@@ -2118,7 +2118,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* verify that expected data is retrieved */
- data_ok = TRUE;
+ data_ok = true;
expected_value =
(uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
@@ -2140,14 +2140,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
if (*ptr_1 != (uint32_t)0) {
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from large ds data good(1).");
+ VRFY((data_ok == true), "slice read from large ds data good(1).");
data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
@@ -2156,11 +2156,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
/* rank */ 2,
/* edge_size */ 10,
/* checker_edge_size */ 3, expected_value,
- /* buf_starts_in_checker */ TRUE);
+ /* buf_starts_in_checker */ true);
- VRFY((data_ok == TRUE), "slice read from large ds data good(2).");
+ VRFY((data_ok == true), "slice read from large ds data good(2).");
- data_ok = TRUE;
+ data_ok = true;
ptr_1 += small_ds_slice_size;
@@ -2175,14 +2175,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
- data_ok = FALSE;
+ data_ok = false;
*ptr_1 = (uint32_t)0;
}
ptr_1++;
}
- VRFY((data_ok == TRUE), "slice read from large ds data good(3).");
+ VRFY((data_ok == true), "slice read from large ds data good(3).");
/* Close dataspaces */
ret = H5Sclose(full_mem_small_ds_sid);
@@ -2258,10 +2258,10 @@ lower_dim_size_comp_test(void)
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
chunk_edge_size = 0;
- lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT);
chunk_edge_size = 5;
- lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (bool)use_collective_io, H5T_NATIVE_UINT);
} /* end for */
return;
@@ -2285,9 +2285,9 @@ lower_dim_size_comp_test(void)
*
* 2) Has no in memory buffer for any other chunk.
*
- * The test differers from Rob Latham's bug report in
- * that is runs with an arbitrary number of processes,
- * and uses a 1 dimensional dataset.
+ * The test differs from Rob Latham's bug report in
+ * that it runs with an arbitrary number of processes,
+ * and uses a 1-dimensional dataset.
*
* Return: void
*
@@ -2301,7 +2301,7 @@ link_chunk_collective_io_test(void)
{
/* const char *fcnName = "link_chunk_collective_io_test()"; */
const char *filename;
- hbool_t mis_match = FALSE;
+ bool mis_match = false;
int i;
int mrc;
int mpi_rank;
@@ -2427,7 +2427,7 @@ link_chunk_collective_io_test(void)
VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded");
/* verify the data */
- mis_match = FALSE;
+ mis_match = false;
for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
diff = local_data_written[i] - local_data_read[i];
@@ -2435,10 +2435,10 @@ link_chunk_collective_io_test(void)
if (diff >= 0.001) {
- mis_match = TRUE;
+ mis_match = true;
}
}
- VRFY((mis_match == FALSE), "dataset data good.");
+ VRFY((mis_match == false), "dataset data good.");
/* Close dataspaces */
ret = H5Sclose(write_mem_ds_sid);
diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c
index 8a3c10e..0c64bf2 100644
--- a/testpar/t_subfiling_vfd.c
+++ b/testpar/t_subfiling_vfd.c
@@ -86,7 +86,7 @@ int curr_nerrors = 0;
typedef void (*test_func)(void);
/* Utility functions */
-static hid_t create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, hbool_t custom_config,
+static hid_t create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, bool custom_config,
H5FD_subfiling_params_t *custom_cfg, int32_t thread_pool_size);
/* Test functions */
@@ -125,7 +125,7 @@ static test_func tests[] = {
* ---------------------------------------------------------------------------
*/
static hid_t
-create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, hbool_t custom_config,
+create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, bool custom_config,
H5FD_subfiling_params_t *custom_cfg, int32_t thread_pool_size)
{
H5FD_subfiling_config_t subfiling_conf;
@@ -200,7 +200,7 @@ test_create_and_close(void)
TESTING_2("file creation and immediate close");
/* Get a default Subfiling FAPL */
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, false, NULL, 0);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
@@ -302,7 +302,7 @@ test_config_file(void)
cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : stripe_size;
cfg.stripe_count = num_iocs_g > 1 ? (num_iocs_g / 2) : 1;
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, true, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
@@ -325,8 +325,8 @@ test_config_file(void)
config_filename = malloc(PATH_MAX);
VRFY(config_filename, "malloc succeeded");
- HDsnprintf(config_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir,
- SUBF_FILENAME, (uint64_t)file_info.st_ino);
+ snprintf(config_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir,
+ SUBF_FILENAME, (uint64_t)file_info.st_ino);
config_file = fopen(config_filename, "r");
VRFY(config_file, "fopen succeeded");
@@ -347,27 +347,27 @@ test_config_file(void)
config_buf[config_file_len] = '\0';
/* Check the stripe_size field in the configuration file */
- substr = HDstrstr(config_buf, "stripe_size");
- VRFY(substr, "HDstrstr succeeded");
+ substr = strstr(config_buf, "stripe_size");
+ VRFY(substr, "strstr succeeded");
- VRFY((HDsscanf(substr, "stripe_size=%" PRId64, &read_stripe_size) == 1), "HDsscanf succeeded");
+ VRFY((sscanf(substr, "stripe_size=%" PRId64, &read_stripe_size) == 1), "sscanf succeeded");
VRFY((read_stripe_size == cfg.stripe_size), "Stripe size comparison succeeded");
/* Check the aggregator_count field in the configuration file */
- substr = HDstrstr(config_buf, "aggregator_count");
- VRFY(substr, "HDstrstr succeeded");
+ substr = strstr(config_buf, "aggregator_count");
+ VRFY(substr, "strstr succeeded");
- VRFY((HDsscanf(substr, "aggregator_count=%d", &read_aggr_count) == 1), "HDsscanf succeeded");
+ VRFY((sscanf(substr, "aggregator_count=%d", &read_aggr_count) == 1), "sscanf succeeded");
if (cfg.stripe_count < num_iocs_g)
VRFY((read_aggr_count == cfg.stripe_count), "Aggregator count comparison succeeded");
else
VRFY((read_aggr_count == num_iocs_g), "Aggregator count comparison succeeded");
/* Check the subfile_count field in the configuration file */
- substr = HDstrstr(config_buf, "subfile_count");
- VRFY(substr, "HDstrstr succeeded");
+ substr = strstr(config_buf, "subfile_count");
+ VRFY(substr, "strstr succeeded");
- VRFY((HDsscanf(substr, "subfile_count=%d", &read_stripe_count) == 1), "HDsscanf succeeded");
+ VRFY((sscanf(substr, "subfile_count=%d", &read_stripe_count) == 1), "sscanf succeeded");
VRFY((read_stripe_count == cfg.stripe_count), "Stripe count comparison succeeded");
/* Check the hdf5_file and subfile_dir fields in the configuration file */
@@ -379,21 +379,21 @@ test_config_file(void)
tmp_buf = malloc(PATH_MAX);
VRFY(tmp_buf, "malloc succeeded");
- substr = HDstrstr(config_buf, "hdf5_file");
- VRFY(substr, "HDstrstr succeeded");
+ substr = strstr(config_buf, "hdf5_file");
+ VRFY(substr, "strstr succeeded");
- HDsnprintf(scan_format, sizeof(scan_format), "hdf5_file=%%%zus", (size_t)(PATH_MAX - 1));
- VRFY((HDsscanf(substr, scan_format, tmp_buf) == 1), "HDsscanf succeeded");
+ snprintf(scan_format, sizeof(scan_format), "hdf5_file=%%%zus", (size_t)(PATH_MAX - 1));
+ VRFY((sscanf(substr, scan_format, tmp_buf) == 1), "sscanf succeeded");
- VRFY((HDstrcmp(tmp_buf, resolved_path) == 0), "HDstrcmp succeeded");
+ VRFY((strcmp(tmp_buf, resolved_path) == 0), "strcmp succeeded");
- substr = HDstrstr(config_buf, "subfile_dir");
- VRFY(substr, "HDstrstr succeeded");
+ substr = strstr(config_buf, "subfile_dir");
+ VRFY(substr, "strstr succeeded");
- HDsnprintf(scan_format, sizeof(scan_format), "subfile_dir=%%%zus", (size_t)(PATH_MAX - 1));
- VRFY((HDsscanf(substr, scan_format, tmp_buf) == 1), "HDsscanf succeeded");
+ snprintf(scan_format, sizeof(scan_format), "subfile_dir=%%%zus", (size_t)(PATH_MAX - 1));
+ VRFY((sscanf(substr, scan_format, tmp_buf) == 1), "sscanf succeeded");
- VRFY((HDstrcmp(tmp_buf, subfile_dir) == 0), "HDstrcmp succeeded");
+ VRFY((strcmp(tmp_buf, subfile_dir) == 0), "strcmp succeeded");
free(tmp_buf);
H5MM_free(subfile_dir);
@@ -403,20 +403,20 @@ test_config_file(void)
VRFY(subfile_name, "malloc succeeded");
/* Verify the name of each subfile is in the configuration file */
- num_digits = (int)(HDlog10(cfg.stripe_count) + 1);
+ num_digits = (int)(log10(cfg.stripe_count) + 1);
for (size_t i = 0; i < (size_t)cfg.stripe_count; i++) {
- HDsnprintf(subfile_name, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, (int)i + 1, cfg.stripe_count);
+ snprintf(subfile_name, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, (int)i + 1, cfg.stripe_count);
- substr = HDstrstr(config_buf, subfile_name);
- VRFY(substr, "HDstrstr succeeded");
+ substr = strstr(config_buf, subfile_name);
+ VRFY(substr, "strstr succeeded");
}
/* Verify that there aren't too many subfiles */
- HDsnprintf(subfile_name, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, (int)cfg.stripe_count + 1, cfg.stripe_count);
- substr = HDstrstr(config_buf, subfile_name);
- VRFY(substr == NULL, "HDstrstr correctly failed");
+ snprintf(subfile_name, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, (int)cfg.stripe_count + 1, cfg.stripe_count);
+ substr = strstr(config_buf, subfile_name);
+ VRFY(substr == NULL, "strstr correctly failed");
free(subfile_name);
free(config_buf);
@@ -507,7 +507,7 @@ test_stripe_sizes(void)
FILE *subfile_ptr;
num_subfiles = 1;
- num_digits = (int)(HDlog10(num_subfiles) + 1);
+ num_digits = (int)(log10(num_subfiles) + 1);
nbytes = (size_t)(cfg.stripe_size * num_subfiles);
@@ -518,7 +518,7 @@ test_stripe_sizes(void)
c_write_buf = write_buf;
- fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, TRUE, &cfg,
+ fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, true, &cfg,
H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
@@ -566,8 +566,8 @@ test_stripe_sizes(void)
h5_stat_size_t subfile_size;
h5_stat_t subfile_info;
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
/* Ensure file exists */
subfile_ptr = fopen(tmp_filename, "r");
@@ -582,8 +582,8 @@ test_stripe_sizes(void)
}
/* Verify that there aren't too many subfiles */
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
/* Ensure file doesn't exist */
subfile_ptr = fopen(tmp_filename, "r");
@@ -612,8 +612,8 @@ test_stripe_sizes(void)
h5_stat_size_t subfile_size;
h5_stat_t subfile_info;
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
/* Ensure file exists */
subfile_ptr = fopen(tmp_filename, "r");
@@ -628,8 +628,8 @@ test_stripe_sizes(void)
}
/* Verify that there aren't too many subfiles */
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
/* Ensure file doesn't exist */
subfile_ptr = fopen(tmp_filename, "r");
@@ -656,7 +656,7 @@ test_stripe_sizes(void)
cfg.stripe_count = num_iocs_g;
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, true, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
/* Create and close file with H5Fcreate to setup superblock */
@@ -669,7 +669,7 @@ test_stripe_sizes(void)
VRFY(file_ptr, "H5FDopen succeeded");
num_subfiles = num_iocs_g;
- num_digits = (int)(HDlog10(num_subfiles) + 1);
+ num_digits = (int)(log10(num_subfiles) + 1);
nbytes = (size_t)(cfg.stripe_size * num_subfiles);
@@ -732,8 +732,8 @@ test_stripe_sizes(void)
h5_stat_size_t subfile_size;
h5_stat_t subfile_info;
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
/* Ensure file exists */
subfile_ptr = fopen(tmp_filename, "r");
@@ -748,8 +748,8 @@ test_stripe_sizes(void)
}
/* Verify that there aren't too many subfiles */
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
/* Ensure file doesn't exist */
subfile_ptr = fopen(tmp_filename, "r");
@@ -789,8 +789,8 @@ test_stripe_sizes(void)
h5_stat_size_t subfile_size;
h5_stat_t subfile_info;
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
/* Ensure file exists */
subfile_ptr = fopen(tmp_filename, "r");
@@ -805,8 +805,8 @@ test_stripe_sizes(void)
}
/* Verify that there aren't too many subfiles */
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
/* Ensure file doesn't exist */
subfile_ptr = fopen(tmp_filename, "r");
@@ -908,7 +908,7 @@ test_selection_strategies(void)
else
stride = 16;
- HDsnprintf(sel_criteria, 128, "%d", stride);
+ snprintf(sel_criteria, 128, "%d", stride);
expected_num_subfiles = ((num_active_ranks - 1) / stride) + 1;
@@ -930,7 +930,7 @@ test_selection_strategies(void)
else
n_iocs = 16;
- HDsnprintf(sel_criteria, 128, "%d", n_iocs);
+ snprintf(sel_criteria, 128, "%d", n_iocs);
expected_num_subfiles = n_iocs;
@@ -946,10 +946,10 @@ test_selection_strategies(void)
}
if (criteria_format_choice == 0) {
- HDsnprintf(criteria_buf, 256, "%d:%s", strategy, sel_criteria);
+ snprintf(criteria_buf, 256, "%d:%s", strategy, sel_criteria);
}
else if (criteria_format_choice == 1) {
- HDsnprintf(criteria_buf, 256, "%s", sel_criteria);
+ snprintf(criteria_buf, 256, "%s", sel_criteria);
}
VRFY(HDsetenv(H5FD_SUBFILING_IOC_SELECTION_CRITERIA, criteria_buf, 1) >= 0,
@@ -965,7 +965,7 @@ test_selection_strategies(void)
if (num_active_ranks < mpi_size)
file_comm = MPI_COMM_SELF;
- fapl_id = create_subfiling_ioc_fapl(file_comm, info_g, TRUE, &cfg,
+ fapl_id = create_subfiling_ioc_fapl(file_comm, info_g, true, &cfg,
H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
@@ -977,12 +977,12 @@ test_selection_strategies(void)
*/
VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded");
- num_digits = (int)(HDlog10(expected_num_subfiles) + 1);
+ num_digits = (int)(log10(expected_num_subfiles) + 1);
/* Ensure all the subfiles are present */
for (int i = 0; i < expected_num_subfiles; i++) {
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, i + 1, expected_num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, i + 1, expected_num_subfiles);
/* Ensure file exists */
subfile_ptr = fopen(tmp_filename, "r");
@@ -991,9 +991,9 @@ test_selection_strategies(void)
}
/* Ensure no extra subfiles are present */
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, expected_num_subfiles + 1,
- expected_num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, expected_num_subfiles + 1,
+ expected_num_subfiles);
/* Ensure file doesn't exist */
subfile_ptr = fopen(tmp_filename, "r");
@@ -1079,7 +1079,7 @@ test_read_different_stripe_size(void)
cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : 1048576;
cfg.stripe_count = num_iocs_g;
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, true, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
@@ -1132,7 +1132,7 @@ test_read_different_stripe_size(void)
h5_stat_t file_info;
FILE *subfile_ptr;
int num_subfiles = cfg.stripe_count;
- int num_digits = (int)(HDlog10(num_subfiles) + 1);
+ int num_digits = (int)(log10(num_subfiles) + 1);
VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded");
@@ -1140,8 +1140,8 @@ test_read_different_stripe_size(void)
h5_stat_size_t subfile_size;
h5_stat_t subfile_info;
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles);
/* Ensure file exists */
subfile_ptr = fopen(tmp_filename, "r");
@@ -1163,7 +1163,7 @@ test_read_different_stripe_size(void)
cfg.stripe_size += (cfg.stripe_size / 2);
cfg.stripe_count *= 2;
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, true, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id);
@@ -1205,13 +1205,13 @@ test_read_different_stripe_size(void)
h5_stat_t file_info;
FILE *subfile_ptr;
int num_subfiles = cfg.stripe_count;
- int num_digits = (int)(HDlog10(num_subfiles / 2) + 1);
+ int num_digits = (int)(log10(num_subfiles / 2) + 1);
VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded");
for (int j = 0; j < num_subfiles; j++) {
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles / 2);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles / 2);
if (j < (num_subfiles / 2)) {
/* Ensure file exists */
@@ -1317,7 +1317,7 @@ test_subfiling_precreate_rank_0(void)
cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : H5FD_SUBFILING_DEFAULT_STRIPE_SIZE;
cfg.stripe_count = num_iocs_g;
- fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, TRUE, &cfg,
+ fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, true, &cfg,
H5FD_IOC_DEFAULT_THREAD_POOL_SIZE);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
@@ -1353,7 +1353,7 @@ test_subfiling_precreate_rank_0(void)
*/
num_subfiles = cfg.stripe_count;
- num_digits = (int)(HDlog10(num_subfiles) + 1);
+ num_digits = (int)(log10(num_subfiles) + 1);
VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded");
@@ -1363,8 +1363,8 @@ test_subfiling_precreate_rank_0(void)
for (int i = 0; i < num_subfiles; i++) {
h5_stat_t subfile_info;
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, i + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, i + 1, num_subfiles);
/* Ensure file exists */
subfile_ptr = fopen(tmp_filename, "r");
@@ -1379,8 +1379,8 @@ test_subfiling_precreate_rank_0(void)
}
/* Verify that there aren't too many subfiles */
- HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
- (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
+ snprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME,
+ (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles);
/* Ensure file doesn't exist */
subfile_ptr = fopen(tmp_filename, "r");
@@ -1395,7 +1395,7 @@ test_subfiling_precreate_rank_0(void)
/* Open the file on all ranks */
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, false, NULL, 0);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id);
@@ -1481,7 +1481,7 @@ test_subfiling_write_many_read_one(void)
TESTING_2("reading back file with single MPI rank");
/* Get a default Subfiling FAPL */
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, false, NULL, 0);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
/* Create file on all ranks */
@@ -1541,7 +1541,7 @@ test_subfiling_write_many_read_one(void)
VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded");
if (MAINPROCESS) {
- fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, FALSE, NULL, 0);
+ fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, false, NULL, 0);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id);
@@ -1606,7 +1606,7 @@ test_subfiling_write_many_read_few(void)
hsize_t start[1];
hsize_t count[1];
hsize_t dset_dims[1];
- hbool_t reading_file = FALSE;
+ bool reading_file = false;
size_t target_size;
hid_t file_id = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
@@ -1637,7 +1637,7 @@ test_subfiling_write_many_read_few(void)
}
/* Get a default Subfiling FAPL */
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, false, NULL, 0);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
/* Create file on all ranks */
@@ -1717,17 +1717,17 @@ test_subfiling_write_many_read_few(void)
}
if (color)
- reading_file = TRUE;
+ reading_file = true;
}
else {
if (node_local_rank == 0) {
sub_comm = ioc_comm;
- reading_file = TRUE;
+ reading_file = true;
}
}
if (reading_file) {
- fapl_id = create_subfiling_ioc_fapl(sub_comm, MPI_INFO_NULL, FALSE, NULL, 0);
+ fapl_id = create_subfiling_ioc_fapl(sub_comm, MPI_INFO_NULL, false, NULL, 0);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id);
@@ -1848,7 +1848,7 @@ test_subfiling_h5fuse(void)
}
/* Get a default Subfiling FAPL */
- fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0);
+ fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, false, NULL, 0);
VRFY((fapl_id >= 0), "FAPL creation succeeded");
/* Create file on all ranks */
@@ -1935,16 +1935,15 @@ test_subfiling_h5fuse(void)
VRFY(tmp_filename, "malloc succeeded");
/* Generate name for configuration file */
- HDsnprintf(tmp_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir,
- SUBF_FILENAME, file_inode);
+ snprintf(tmp_filename, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir,
+ SUBF_FILENAME, file_inode);
- args[0] = HDstrdup("env");
- args[1] = HDstrdup("sh");
- args[2] = HDstrdup("h5fuse.sh");
- args[3] = HDstrdup("-q");
- args[4] = HDstrdup("-f");
- args[5] = tmp_filename;
- args[6] = NULL;
+ args[0] = strdup("env");
+ args[1] = strdup("./h5fuse.sh");
+ args[2] = strdup("-q");
+ args[3] = strdup("-f");
+ args[4] = tmp_filename;
+ args[5] = NULL;
/* Call h5fuse script from MPI rank 0 */
execvp("env", args);
@@ -2013,7 +2012,7 @@ test_subfiling_h5fuse(void)
if (MAINPROCESS) {
char *filename_buf;
int num_subfiles = num_iocs_g;
- int num_digits = (int)(HDlog10(num_subfiles) + 1);
+ int num_digits = (int)(log10(num_subfiles) + 1);
/* Delete the regular HDF5 file */
H5Pset_fapl_sec2(fapl_id);
@@ -2028,8 +2027,8 @@ test_subfiling_h5fuse(void)
VRFY(filename_buf, "malloc succeeded");
/* Generate name for configuration file */
- HDsnprintf(filename_buf, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir,
- SUBF_FILENAME, file_inode);
+ snprintf(filename_buf, PATH_MAX, "%s/" H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, config_dir,
+ SUBF_FILENAME, file_inode);
/* Delete the configuration file */
if (HDremove(filename_buf) < 0) {
@@ -2039,8 +2038,8 @@ test_subfiling_h5fuse(void)
for (int i = 0; i < num_subfiles; i++) {
/* Generate name for each subfile */
- HDsnprintf(filename_buf, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, file_inode,
- num_digits, i + 1, num_subfiles);
+ snprintf(filename_buf, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, file_inode,
+ num_digits, i + 1, num_subfiles);
/* Delete the subfile */
if (HDremove(filename_buf) < 0) {
@@ -2072,13 +2071,13 @@ parse_subfiling_env_vars(void)
{
char *env_value;
- if (NULL != (env_value = HDgetenv(H5FD_SUBFILING_STRIPE_SIZE))) {
+ if (NULL != (env_value = getenv(H5FD_SUBFILING_STRIPE_SIZE))) {
stripe_size_g = strtoll(env_value, NULL, 0);
if ((ERANGE == errno) || (stripe_size_g <= 0))
stripe_size_g = -1;
}
- if (NULL != (env_value = HDgetenv(H5FD_SUBFILING_IOC_PER_NODE))) {
+ if (NULL != (env_value = getenv(H5FD_SUBFILING_IOC_PER_NODE))) {
ioc_per_node_g = strtol(env_value, NULL, 0);
if ((ERANGE == errno) || (ioc_per_node_g <= 0))
ioc_per_node_g = -1;
@@ -2092,16 +2091,16 @@ parse_subfiling_env_vars(void)
ioc_per_node_g = node_local_size;
}
- if (NULL != (env_value = HDgetenv(H5FD_IOC_THREAD_POOL_SIZE))) {
+ if (NULL != (env_value = getenv(H5FD_IOC_THREAD_POOL_SIZE))) {
ioc_thread_pool_size_g = atoi(env_value);
if (ioc_thread_pool_size_g <= 0)
ioc_thread_pool_size_g = -1;
}
- if (NULL != (env_value = HDgetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX))) {
+ if (NULL != (env_value = getenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX))) {
assert(config_dir);
- HDstrncpy(config_dir, env_value, PATH_MAX);
+ strncpy(config_dir, env_value, PATH_MAX);
/* Just in case.. */
config_dir[PATH_MAX - 1] = '\0';
@@ -2117,11 +2116,10 @@ int
main(int argc, char **argv)
{
unsigned seed;
- hbool_t must_unset_stripe_size_env = FALSE;
- hbool_t must_unset_ioc_per_node_env = FALSE;
- hbool_t must_unset_ioc_thread_count_env = FALSE;
- hbool_t must_unset_config_dir_env = FALSE;
- char *env_value = NULL;
+ bool must_unset_stripe_size_env = false;
+ bool must_unset_ioc_per_node_env = false;
+ bool must_unset_ioc_thread_count_env = false;
+ bool must_unset_config_dir_env = false;
int required = MPI_THREAD_MULTIPLE;
int provided = 0;
@@ -2257,7 +2255,7 @@ main(int argc, char **argv)
}
/* Initialize to current working directory for now */
- HDsnprintf(config_dir, PATH_MAX, ".");
+ snprintf(config_dir, PATH_MAX, ".");
/* Grab values from environment variables if set */
parse_subfiling_env_vars();
@@ -2283,7 +2281,7 @@ main(int argc, char **argv)
}
if (MAINPROCESS)
- HDputs("");
+ puts("");
/*
* Set any unset Subfiling environment variables and re-run
@@ -2308,7 +2306,7 @@ main(int argc, char **argv)
VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded");
}
- HDsnprintf(tmp, sizeof(tmp), "%" PRId64, stripe_size);
+ snprintf(tmp, sizeof(tmp), "%" PRId64, stripe_size);
if (HDsetenv(H5FD_SUBFILING_STRIPE_SIZE, tmp, 1) < 0) {
if (MAINPROCESS)
@@ -2317,7 +2315,7 @@ main(int argc, char **argv)
goto exit;
}
- must_unset_stripe_size_env = TRUE;
+ must_unset_stripe_size_env = true;
}
if (ioc_per_node_g < 0) {
const char *ioc_per_node_str;
@@ -2334,7 +2332,7 @@ main(int argc, char **argv)
goto exit;
}
- must_unset_ioc_per_node_env = TRUE;
+ must_unset_ioc_per_node_env = true;
}
if (ioc_thread_pool_size_g < 0) {
if (HDsetenv(H5FD_IOC_THREAD_POOL_SIZE, "2", 1) < 0) {
@@ -2344,10 +2342,10 @@ main(int argc, char **argv)
goto exit;
}
- must_unset_ioc_thread_count_env = TRUE;
+ must_unset_ioc_thread_count_env = true;
}
- if (!(env_value = HDgetenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX))) {
+ if (NULL == getenv(H5FD_SUBFILING_CONFIG_FILE_PREFIX)) {
int rand_value = 0;
if (MAINPROCESS)
@@ -2397,7 +2395,7 @@ main(int argc, char **argv)
}
}
- must_unset_config_dir_env = TRUE;
+ must_unset_config_dir_env = true;
}
/* Grab values from environment variables */
@@ -2428,13 +2426,13 @@ main(int argc, char **argv)
}
if (MAINPROCESS)
- HDputs("");
+ puts("");
if (nerrors)
goto exit;
if (MAINPROCESS)
- HDputs("All Subfiling VFD tests passed\n");
+ puts("All Subfiling VFD tests passed\n");
exit:
if (must_unset_stripe_size_env)
diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c
index 5e9070d..ac524ac 100644
--- a/testpar/t_vfd.c
+++ b/testpar/t_vfd.c
@@ -29,8 +29,8 @@
static MPI_Comm comm = MPI_COMM_WORLD;
static MPI_Info info = MPI_INFO_NULL;
-hbool_t pass = TRUE; /* set to FALSE on error */
-hbool_t disp_failure_mssgs = TRUE; /* global force display of failure messages */
+bool pass = true; /* set to false on error */
+bool disp_failure_mssgs = true; /* global force display of failure messages */
const char *failure_mssg = NULL;
const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
@@ -162,7 +162,7 @@ int *erbufs[2] = {NULL, NULL}; /* Array of expected read buffers */
if (err_result == 0) \
PASSED(); \
else \
- HDputs(" ***TEST FAILED***"); \
+ puts(" ***TEST FAILED***"); \
} \
} while (0)
@@ -170,7 +170,7 @@ int *erbufs[2] = {NULL, NULL}; /* Array of expected read buffers */
static herr_t test_selection_io_read_verify(hid_t dxpl, int mpi_rank, hsize_t start[], hsize_t block[],
H5FD_t *lf, H5FD_mem_t type, uint32_t count, hid_t mem_spaces[],
hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[],
- uint32_t rbufcount, int *erb[], hbool_t shorten_rbufs);
+ uint32_t rbufcount, int *erb[], bool shorten_rbufs);
static herr_t test_selection_io_write(hid_t dxpl, H5FD_t *lf, H5FD_mem_t type, uint32_t count,
hid_t mem_spaces[], hid_t file_spaces[], haddr_t offsets[],
@@ -218,9 +218,9 @@ alloc_and_init_file_images(int mpi_size)
int buf_len;
size_t buf_size;
int i;
- hbool_t show_progress = FALSE;
+ bool show_progress = false;
- pass = TRUE;
+ pass = true;
if (show_progress)
fprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -240,7 +240,7 @@ alloc_and_init_file_images(int mpi_size)
if ((!increasing_fi_buf) || (!decreasing_fi_buf) || (!negative_fi_buf) || (!zero_fi_buf) ||
(!read_fi_buf)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't allocate one or more file image buffers.";
}
}
@@ -348,11 +348,11 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
const char *fcn_name = "setup_vfd_test_file()";
char filename[512];
int cp = 0;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- unsigned flags = 0; /* file open flags */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ unsigned flags = 0; /* file open flags */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
assert(vfd_name);
assert(lf_ptr);
@@ -368,7 +368,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname() failed.\n";
}
}
@@ -381,23 +381,23 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't create fapl.";
}
}
if (pass) {
- if (HDstrcmp(vfd_name, "mpio") == 0) {
+ if (strcmp(vfd_name, "mpio") == 0) {
if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't set mpio fapl.";
}
}
#ifdef H5_HAVE_SUBFILING_VFD
- else if (HDstrcmp(vfd_name, H5FD_SUBFILING_NAME) == 0) {
+ else if (strcmp(vfd_name, H5FD_SUBFILING_NAME) == 0) {
H5FD_subfiling_params_t shared_conf = {
/* ioc_selection = */ SELECT_IOC_ONE_PER_NODE,
@@ -408,7 +408,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
/* magic = */ H5FD_SUBFILING_FAPL_MAGIC,
/* version = */ H5FD_SUBFILING_CURR_FAPL_VERSION,
/* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */
- /* require_ioc = */ TRUE,
+ /* require_ioc = */ true,
/* shared_cfg = */ shared_conf,
};
H5FD_ioc_config_t ioc_config = {
@@ -420,21 +420,21 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if ((pass) && ((ioc_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't create ioc fapl.";
}
/* set the MPI communicator and info in the FAPL */
if (H5Pset_mpi_params(ioc_fapl, comm, info) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't set MPI communicator and info in IOC fapl.";
}
/* set the MPI communicator and info in the FAPL */
if (H5Pset_mpi_params(fapl_id, comm, info) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't set MPI communicator and info in subfiling fapl.";
}
@@ -444,7 +444,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
/* Get subfiling VFD defaults */
if ((pass) && (H5Pget_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't get sub-filing VFD defaults.";
}
@@ -453,14 +453,14 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
/* Get IOC VFD defaults */
if ((pass) && ((H5Pget_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't get IOC VFD defaults.";
}
/* Now we can set the IOC fapl. */
if ((pass) && ((H5Pset_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't set IOC fapl.";
}
}
@@ -468,7 +468,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if ((pass) && ((H5Pset_fapl_sec2(ioc_fapl) == FAIL))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't set sec2 fapl.";
}
}
@@ -479,13 +479,13 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
/* Now we can set the SUBFILING fapl before returning. */
if ((pass) && (H5Pset_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Can't set subfiling fapl.";
}
}
#endif
else {
- pass = FALSE;
+ pass = false;
failure_mssg = "un-supported VFD";
}
}
@@ -498,7 +498,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
- pass = FALSE;
+ pass = false;
failure_mssg = "h5_fixname() failed.\n";
}
}
@@ -514,7 +514,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDopen() failed.\n";
}
}
@@ -530,7 +530,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (H5FDset_eoa(lf, H5FD_MEM_DEFAULT, eoa) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDset_eoa() failed.\n";
}
}
@@ -544,7 +544,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (dxpl_id < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
}
}
@@ -556,7 +556,7 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (H5Pset_dxpl_mpio(dxpl_id, xfer_mode) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_dxpl_mpio() failed.";
}
}
@@ -568,14 +568,14 @@ setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_x
if (H5Pset_dxpl_mpio_collective_opt(dxpl_id, coll_opt_mode) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5Pset_dxpl_mpio() failed.";
}
}
if (pass) { /* setup pointers with return values */
- HDstrncpy(file_name, filename, 512);
+ strncpy(file_name, filename, 512);
*lf_ptr = lf;
*fapl_id_ptr = fapl_id;
*dxpl_id_ptr = dxpl_id;
@@ -612,7 +612,7 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
{
const char *fcn_name = "takedown_vfd_test_file()";
int cp = 0;
- hbool_t show_progress = FALSE;
+ bool show_progress = false;
assert(lf_ptr);
assert(fapl_id_ptr);
@@ -629,7 +629,7 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
if (H5FDclose(*lf_ptr) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDclose() failed.\n";
}
}
@@ -647,7 +647,7 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
- pass = FALSE;
+ pass = false;
failure_mssg = "HDremove() failed.\n";
}
}
@@ -661,7 +661,7 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
/* Close the fapl */
if (H5Pclose(*fapl_id_ptr) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "can't close fapl.\n";
}
@@ -671,7 +671,7 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
/* Close the dxpl */
if (H5Pclose(*dxpl_id_ptr) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "can't close dxpl.\n";
}
@@ -718,7 +718,7 @@ takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fap
*
* 7) On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -731,10 +731,10 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
char test_title[120];
char filename[512];
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
uint32_t count;
@@ -743,26 +743,26 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
size_t sizes[1];
void *bufs[1];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / ind I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / col I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -797,7 +797,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) <
0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite() on rank 0 failed.\n";
}
}
@@ -837,7 +837,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread_vector() failed.\n";
}
@@ -847,7 +847,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[i] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in read_fi_buf (1).\n";
break;
}
@@ -856,7 +856,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[i] != increasing_fi_buf[i]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in read_fi_buf (2).\n";
break;
}
@@ -948,7 +948,7 @@ vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
*
* 11) On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -961,10 +961,10 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
char test_title[120];
char filename[512];
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
uint32_t count;
@@ -973,26 +973,26 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
size_t sizes[1];
void *bufs[1];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / ind I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / col I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -1027,7 +1027,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)decreasing_fi_buf) <
0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite() on rank 0 failed.\n";
}
}
@@ -1076,7 +1076,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread_vector() failed.\n";
}
}
@@ -1115,7 +1115,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread_vector() failed.\n";
}
}
@@ -1138,7 +1138,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[i] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in read_fi_buf (1).\n";
break;
}
@@ -1147,7 +1147,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[i] != decreasing_fi_buf[i]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in read_fi_buf (2).\n";
break;
}
@@ -1245,7 +1245,7 @@ vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
*
* 9) On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -1262,10 +1262,10 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
int32_t buf_2[(INTS_PER_RANK / 4) + 1];
int32_t buf_3[(INTS_PER_RANK / 4) + 1];
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
uint32_t count;
@@ -1274,26 +1274,26 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
size_t sizes[4];
void *bufs[4];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / ind I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / col I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -1328,7 +1328,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)negative_fi_buf) <
0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite() on rank 0 failed.\n";
}
}
@@ -1420,7 +1420,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread_vector() failed.\n";
}
}
@@ -1441,7 +1441,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (((mpi_rank % 2 == 0) && (buf_0[i] != negative_fi_buf[base_index + i])) ||
((mpi_rank % 2 == 1) && (buf_3[i] != negative_fi_buf[base_index + i]))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in buf (1).\n";
}
}
@@ -1453,7 +1453,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (((mpi_rank % 2 == 0) && (buf_1[i] != negative_fi_buf[base_index + i])) ||
((mpi_rank % 2 == 1) && (buf_2[i] != negative_fi_buf[base_index + i]))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in buf (2).\n";
}
}
@@ -1465,7 +1465,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (((mpi_rank % 2 == 0) && (buf_2[i] != negative_fi_buf[base_index + i])) ||
((mpi_rank % 2 == 1) && (buf_1[i] != negative_fi_buf[base_index + i]))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in buf (3).\n";
}
}
@@ -1477,7 +1477,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (((mpi_rank % 2 == 0) && (buf_3[i] != negative_fi_buf[base_index + i])) ||
((mpi_rank % 2 == 1) && (buf_0[i] != negative_fi_buf[base_index + i]))) {
- pass = FALSE;
+ pass = false;
failure_mssg = "Unexpected value in buf (4).\n";
}
}
@@ -1598,7 +1598,7 @@ vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
*
* 9) On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -1612,10 +1612,10 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
char filename[512];
haddr_t eoa;
haddr_t base_addr;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int j;
@@ -1627,26 +1627,26 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
size_t sizes[4];
void *bufs[4];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / ind I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / col I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -1681,7 +1681,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) <
0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite() on rank 0 failed.\n";
}
}
@@ -1808,7 +1808,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread_vector() failed (1).\n";
}
}
@@ -1842,7 +1842,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.1)";
fprintf(stdout, "\nread_fi_buf[%d] = %d, increasing_fi_buf[%d] = %d\n", j,
read_fi_buf[j], j, increasing_fi_buf[j]);
@@ -1852,7 +1852,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.2)";
}
}
@@ -1860,7 +1860,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.3)";
}
}
@@ -1868,7 +1868,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.4)";
}
}
@@ -1879,7 +1879,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2.1)";
}
}
@@ -1887,7 +1887,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2.2)";
}
}
@@ -1895,7 +1895,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2.3)";
}
}
@@ -1906,7 +1906,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3.1)";
}
}
@@ -1914,7 +1914,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3.2)";
}
}
@@ -1923,19 +1923,19 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
case 3:
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (4)";
}
break;
default:
- assert(FALSE); /* should be un-reachable */
+ assert(false); /* should be un-reachable */
break;
}
}
else if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (5)";
}
} /* end for loop */
@@ -2026,7 +2026,7 @@ vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
*
* 9) On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -2040,10 +2040,10 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
char filename[512];
haddr_t eoa;
haddr_t base_addr;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int j;
@@ -2054,26 +2054,26 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
size_t sizes[2];
void *bufs[(INTS_PER_RANK / 16) + 1];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / ind I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / col I/O",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -2108,7 +2108,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) <
0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite() on rank 0 failed.\n";
}
}
@@ -2168,7 +2168,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread_vector() failed (1).\n";
}
}
@@ -2193,13 +2193,13 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1)";
}
}
else if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2)";
}
} /* end for loop */
@@ -2265,7 +2265,7 @@ vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_
*
* 6) On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -2278,10 +2278,10 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
char test_title[120];
char filename[512];
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
uint32_t count;
@@ -2290,26 +2290,26 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
size_t sizes[1];
const void *bufs[1];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 1 -- %s / col op / ind I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 1 -- %s / col op / col I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -2347,7 +2347,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed.\n";
}
}
@@ -2373,7 +2373,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread() failed.\n";
}
@@ -2381,7 +2381,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[i] != increasing_fi_buf[i]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file";
break;
}
@@ -2451,7 +2451,7 @@ vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
*
* 5) Close the test file. On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -2464,10 +2464,10 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
char test_title[120];
char filename[512];
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int j;
@@ -2477,26 +2477,26 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
size_t sizes[1];
const void *bufs[1];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 2 -- %s / col op / ind I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 2 -- %s / col op / col I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -2539,7 +2539,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (1).\n";
}
}
@@ -2547,7 +2547,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (2).\n";
}
}
@@ -2566,7 +2566,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (3).\n";
}
}
@@ -2580,7 +2580,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (4).\n";
}
}
@@ -2607,7 +2607,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread() failed.\n";
}
@@ -2619,7 +2619,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file";
break;
}
@@ -2631,7 +2631,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != negative_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file";
break;
}
@@ -2700,7 +2700,7 @@ vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
*
* 5) Close the test file. On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -2717,10 +2717,10 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
int ints_per_write;
size_t bytes_per_write;
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int j;
@@ -2730,26 +2730,26 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
size_t sizes[4];
const void *bufs[4];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 3 -- %s / col op / ind I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 3 -- %s / col op / col I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -2810,7 +2810,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (1).\n";
}
}
@@ -2838,7 +2838,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread() failed.\n";
}
@@ -2850,7 +2850,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1)";
break;
}
@@ -2862,7 +2862,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != decreasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2)";
break;
}
@@ -2874,7 +2874,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != negative_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3)";
break;
}
@@ -2886,7 +2886,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != zero_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3)";
break;
}
@@ -2960,7 +2960,7 @@ vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
*
* 5) Close the test file. On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -2977,10 +2977,10 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
int ints_per_write;
size_t bytes_per_write;
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int j;
@@ -2990,26 +2990,26 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
size_t sizes[4];
const void *bufs[4];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 4 -- %s / col op / ind I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 4 -- %s / col op / col I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -3071,7 +3071,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (1).\n";
}
}
@@ -3099,7 +3099,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread() failed.\n";
}
@@ -3111,7 +3111,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != zero_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1)";
break;
}
@@ -3123,7 +3123,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != negative_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2)";
break;
}
@@ -3135,7 +3135,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != decreasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3)";
break;
}
@@ -3147,7 +3147,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3)";
break;
}
@@ -3258,7 +3258,7 @@ vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
*
* 7) Close the test file. On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -3273,10 +3273,10 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
haddr_t base_addr;
int base_index;
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int j;
@@ -3287,26 +3287,26 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
size_t sizes[4];
const void *bufs[4];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 5 -- %s / col op / ind I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 5 -- %s / col op / col I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -3343,7 +3343,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed.\n";
}
}
@@ -3460,7 +3460,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (1).\n";
}
}
@@ -3487,7 +3487,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread() failed.\n";
}
@@ -3506,7 +3506,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != negative_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.1)";
printf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
@@ -3517,7 +3517,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != decreasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.2)";
printf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
@@ -3528,7 +3528,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.3)";
printf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
@@ -3539,7 +3539,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1.4)";
}
}
@@ -3550,7 +3550,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2.1)";
printf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
@@ -3561,7 +3561,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != decreasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2.2)";
printf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
@@ -3572,7 +3572,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2.3)";
}
}
@@ -3583,7 +3583,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != negative_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3.1)";
printf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
@@ -3594,7 +3594,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (3.2)";
}
}
@@ -3603,13 +3603,13 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
case 3:
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (4)";
}
break;
default:
- assert(FALSE); /* should be un-reachable */
+ assert(false); /* should be un-reachable */
break;
}
}
@@ -3694,7 +3694,7 @@ vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
*
* 9) On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -3708,10 +3708,10 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
char filename[512];
haddr_t eoa;
haddr_t base_addr;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int base_index;
@@ -3721,26 +3721,26 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
size_t sizes[2];
const void *bufs[(INTS_PER_RANK / 16) + 1];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / independent",
- vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / independent",
+ vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 6 -- %s / col op / ind I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / col op / ind I/O",
+ vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsnprintf(test_title, sizeof(test_title),
- "parallel vector write test 6 -- %s / col op / col I/O", vfd_name);
+ snprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / col op / col I/O",
+ vfd_name);
}
TESTING(test_title);
@@ -3774,7 +3774,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)zero_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite() on rank 0 failed.\n";
}
}
@@ -3822,7 +3822,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (1).\n";
}
}
@@ -3847,7 +3847,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread() failed.\n";
}
@@ -3857,13 +3857,13 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[i] != increasing_fi_buf[i]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1)";
}
}
else if (read_fi_buf[i] != zero_fi_buf[i]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2)";
}
}
@@ -3948,7 +3948,7 @@ vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
*
* 7) Close the test file. On rank 0, delete the test file.
*
- * Return: FALSE on success, TRUE if any errors are detected.
+ * Return: false on success, true if any errors are detected.
*
*-------------------------------------------------------------------------
*/
@@ -3964,10 +3964,10 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
haddr_t addr_increment;
int base_index;
haddr_t eoa;
- hbool_t show_progress = FALSE;
- hid_t fapl_id = -1; /* file access property list ID */
- hid_t dxpl_id = -1; /* data access property list ID */
- H5FD_t *lf = NULL; /* VFD struct ptr */
+ bool show_progress = false;
+ hid_t fapl_id = H5I_INVALID_HID; /* file access property list ID */
+ hid_t dxpl_id = H5I_INVALID_HID; /* data access property list ID */
+ H5FD_t *lf = NULL; /* VFD struct ptr */
int cp = 0;
int i;
int j;
@@ -3978,23 +3978,23 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
size_t sizes[8];
const void *bufs[8];
- pass = TRUE;
+ pass = true;
if (mpi_rank == 0) {
if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
- HDsprintf(test_title, "parallel vector write test 7 -- %s / independent", vfd_name);
+ sprintf(test_title, "parallel vector write test 7 -- %s / independent", vfd_name);
}
else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
- HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / ind I/O", vfd_name);
+ sprintf(test_title, "parallel vector write test 7 -- %s / col op / ind I/O", vfd_name);
}
else {
assert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
- HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / col I/O", vfd_name);
+ sprintf(test_title, "parallel vector write test 7 -- %s / col op / col I/O", vfd_name);
}
TESTING(test_title);
@@ -4031,7 +4031,7 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed.\n";
}
}
@@ -4064,7 +4064,7 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDwrite_vector() failed (1).\n";
}
}
@@ -4090,7 +4090,7 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "H5FDread() failed.\n";
}
@@ -4106,7 +4106,7 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != increasing_fi_buf[j]) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (1)";
printf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j],
@@ -4117,7 +4117,7 @@ vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer
if (read_fi_buf[j] != 0) {
- pass = FALSE;
+ pass = false;
failure_mssg = "unexpected data read from file (2)";
printf("\nread_fi_buf[%d] = %d, 0 expected.\n", j, read_fi_buf[j]);
@@ -4355,7 +4355,7 @@ static herr_t
test_selection_io_read_verify(hid_t dxpl, int mpi_rank, hsize_t start[], hsize_t block[], H5FD_t *lf,
H5FD_mem_t type, uint32_t count, hid_t mem_spaces[], hid_t file_spaces[],
haddr_t offsets[], size_t element_sizes[], uint32_t rbufcount, int *erb[],
- hbool_t shorten_rbufs)
+ bool shorten_rbufs)
{
int *rbuf1 = NULL;
int *rbuf2 = NULL;
@@ -4578,7 +4578,7 @@ test_selection_io_types_shorten(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dx
/* Read and verify */
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 2, mem_spaces,
- file_spaces, addrs, element_sizes, 2, (int **)erbufs, FALSE) < 0)
+ file_spaces, addrs, element_sizes, 2, (int **)erbufs, false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -4630,7 +4630,7 @@ test_selection_io_types_shorten(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dx
/* Read entire file buffer and verify */
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 2, mem_spaces,
- file_spaces, addrs, element_sizes, 2, (int **)fbufs, FALSE) < 0)
+ file_spaces, addrs, element_sizes, 2, (int **)fbufs, false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -4745,7 +4745,7 @@ test_selection_io_types_shorten(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dx
/* Read and verify */
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 2, mem_spaces,
file_spaces, addrs, element_sizes, 1, (int **)&erbufs[1],
- shorten_element_sizes ? TRUE : FALSE) < 0)
+ shorten_element_sizes ? true : false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -4789,7 +4789,7 @@ test_selection_io_types_shorten(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dx
verify_start[1] = start[0] * block[1];
verify_block[1] = (block[0] * block[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 2, mem_spaces,
- file_spaces, addrs, element_sizes, 2, (int **)fbufs, FALSE) < 0)
+ file_spaces, addrs, element_sizes, 2, (int **)fbufs, false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -4892,7 +4892,7 @@ test_selection_io_types_1d_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl
verify_block[0] = (count[0] * stride[0]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&erbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -4929,7 +4929,7 @@ test_selection_io_types_1d_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl
verify_block[0] = block[0];
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&fbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -4985,7 +4985,7 @@ test_selection_io_types_1d_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl
verify_block[0] = (count[0] * count[1] * stride[0]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&erbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5024,7 +5024,7 @@ test_selection_io_types_1d_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl
verify_block[0] = (block[0] * block[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&fbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5094,7 +5094,7 @@ test_selection_io_types_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (block[0] * block[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&fbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5146,7 +5146,7 @@ test_selection_io_types_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (count[0] * count[1] * stride[0]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&erbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5178,7 +5178,7 @@ test_selection_io_types_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (block[0] * block[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&fbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5228,7 +5228,7 @@ test_selection_io_types_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (count[0] * count[1] * stride[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&erbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5260,7 +5260,7 @@ test_selection_io_types_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (block[0] * block[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&fbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5326,7 +5326,7 @@ test_selection_io_types_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (count[0] * count[1] * stride[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&erbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5358,7 +5358,7 @@ test_selection_io_types_2d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (block[0] * block[1]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[1],
&file_spaces[1], &addrs[1], element_sizes, 1, (int **)&fbufs[1],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5422,7 +5422,7 @@ test_selection_io_types_1d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&fbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5468,7 +5468,7 @@ test_selection_io_types_1d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (count[0] * stride[0]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&erbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5496,7 +5496,7 @@ test_selection_io_types_1d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = block[0];
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&fbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5542,7 +5542,7 @@ test_selection_io_types_1d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = count[0];
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&erbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5570,7 +5570,7 @@ test_selection_io_types_1d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = block[0];
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&fbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5616,7 +5616,7 @@ test_selection_io_types_1d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = (count[0] * stride[0]);
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&erbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5644,7 +5644,7 @@ test_selection_io_types_1d(int mpi_rank, int mpi_size, H5FD_t *lf, hid_t dxpl, H
verify_block[0] = block[0];
if (test_selection_io_read_verify(dxpl, mpi_rank, verify_start, verify_block, lf, type, 1, &mem_spaces[0],
&file_spaces[0], &addrs[0], element_sizes, 1, (int **)&fbufs[0],
- FALSE) < 0)
+ false) < 0)
P_TEST_ERROR;
MPI_Barrier(comm);
@@ -5902,7 +5902,7 @@ main(int argc, char **argv)
int provided = 0;
#endif
int mpi_size;
- int mpi_rank;
+ int mpi_rank = 0;
int ret;
#ifdef H5_HAVE_SUBFILING_VFD
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index ed0510c..584ca1f 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -234,9 +234,11 @@ parse_options(int argc, char **argv)
nerrors++;
return (1);
}
- printf("Test filenames are:\n");
- for (i = 0; i < n; i++)
- printf(" %s\n", filenames[i]);
+ if (mpi_rank == 0) {
+ printf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ printf(" %s\n", filenames[i]);
+ }
}
return (0);
@@ -248,7 +250,7 @@ parse_options(int argc, char **argv)
hid_t
create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
- hid_t ret_pl = -1;
+ hid_t ret_pl = H5I_INVALID_HID;
herr_t ret; /* generic return value */
int mpi_rank; /* mpi variables */
@@ -265,9 +267,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "H5Pset_fapl_mpio succeeded");
- ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, true);
VRFY((ret >= 0), "H5Pset_all_coll_metadata_ops succeeded");
- ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ ret = H5Pset_coll_metadata_write(ret_pl, true);
VRFY((ret >= 0), "H5Pset_coll_metadata_write succeeded");
return (ret_pl);
}
@@ -314,6 +316,8 @@ main(int argc, char **argv)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ mpi_rank_framework_g = mpi_rank;
+
dim0 = ROW_FACTOR * mpi_size;
dim1 = COL_FACTOR * mpi_size;
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 631ad44..6ac8080 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -27,12 +27,12 @@ enum H5TEST_COLL_CHUNK_API {
API_MULTI_IND
};
-#ifndef FALSE
-#define FALSE 0
+#ifndef false
+#define false 0
#endif
-#ifndef TRUE
-#define TRUE 1
+#ifndef true
+#define true 1
#endif
/* Constants definitions */
@@ -82,8 +82,8 @@ enum H5TEST_COLL_CHUNK_API {
#define LINK_TRUE_NUM_CHUNK 2
#define LINK_FALSE_NUM_CHUNK 6
#define MULTI_TRUE_PERCENT 50
-#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true"
-#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false"
+#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_TRUE"
+#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_FALSE"
#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard"
#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll"