diff options
author | jhendersonHDF <jhenderson@hdfgroup.org> | 2023-07-18 11:27:07 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-18 11:27:07 (GMT) |
commit | aab497a6312a9d8434a7dc7b5a593713fc8fbce0 (patch) | |
tree | 018a95afb94dd103dcbef98e4a383382a52968bf /testpar | |
parent | 919ce7adc2f24414b749c2a6880da00259350881 (diff) | |
download | hdf5-aab497a6312a9d8434a7dc7b5a593713fc8fbce0.zip hdf5-aab497a6312a9d8434a7dc7b5a593713fc8fbce0.tar.gz hdf5-aab497a6312a9d8434a7dc7b5a593713fc8fbce0.tar.bz2 |
Fix some warnings in developer builds (#3247)
* Fix some warnings in developer builds
* Switch approach to Winline flag
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/API/t_coll_md_read.c | 30 | ||||
-rw-r--r-- | testpar/t_bigio.c | 12 | ||||
-rw-r--r-- | testpar/t_coll_chunk.c | 12 | ||||
-rw-r--r-- | testpar/t_coll_md.c | 30 | ||||
-rw-r--r-- | testpar/t_filters_parallel.c | 3 |
5 files changed, 27 insertions, 60 deletions
diff --git a/testpar/API/t_coll_md_read.c b/testpar/API/t_coll_md_read.c index 0b019c1..353d5f6 100644 --- a/testpar/API/t_coll_md_read.c +++ b/testpar/API/t_coll_md_read.c @@ -369,36 +369,6 @@ test_multi_chunk_io_addrmap_issue(void) * I/O with collective metadata reads enabled doesn't cause issues due to * collective metadata reads being made only by process 0 in H5D__sort_chunk(). * - * NOTE: Due to the way that the threshold value which pertains to this test - * is currently calculated within HDF5, the following two conditions must be - * true to trigger the issue: - * - * Condition 1: A certain threshold ratio must be met in order to have HDF5 - * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is - * given by the following: - * - * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30% - * - * where: - * * `sum_chunk` is the combined sum of the number of chunks selected in - * the dataset by all ranks (chunks selected by more than one rank count - * individually toward the sum for each rank selecting that chunk) - * * `dataset_nchunks` is the number of chunks in the dataset (selected - * or not) - * * `mpi_size` is the size of the MPI Communicator - * - * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain - * threshold (as of this writing, 10000). - * - * To satisfy both these conditions, we #define a macro, - * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the - * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the - * 10000 threshold from condition 2). We then create a dataset of that many - * chunks and have each MPI rank write to and read from a piece of every single - * chunk in the dataset. This ensures chunk utilization is the max possible - * and exceeds our 30% target ratio, while always exactly matching the numeric - * chunk threshold value of condition 2. - * * Failure in this test may either cause a hang, or, due to how the MPI calls * pertaining to this issue might mistakenly match up, may cause an MPI error * message similar to: diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c index 49c39e4..0c4d125 100644 --- a/testpar/t_bigio.c +++ b/testpar/t_bigio.c @@ -1505,6 +1505,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(file_dataspace); VRFY_G((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } switch (mem_selection) { @@ -1528,6 +1531,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(mem_dataspace); VRFY_G((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } /* set up the collective transfer property list */ @@ -1765,6 +1771,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(file_dataspace); VRFY_G((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } switch (mem_selection) { @@ -1788,6 +1797,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(mem_dataspace); VRFY_G((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } /* fill dataset with test data */ diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index ce9d09c..5dd0bb8 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -636,6 +636,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(file_dataspace); VRFY((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } switch (mem_selection) { @@ -659,6 +662,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(mem_dataspace); VRFY((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } /* set up the collective transfer property list */ @@ -899,6 +905,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(file_dataspace); VRFY((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } switch (mem_selection) { @@ -922,6 +931,9 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap status = H5Sselect_all(mem_dataspace); VRFY((status >= 0), "H5Sselect_all succeeded"); break; + + default: + break; } /* fill dataset with test data */ diff --git a/testpar/t_coll_md.c b/testpar/t_coll_md.c index b6442a5..1220111 100644 --- a/testpar/t_coll_md.c +++ b/testpar/t_coll_md.c @@ -342,36 +342,6 @@ test_multi_chunk_io_addrmap_issue(void) * I/O with collective metadata reads enabled doesn't cause issues due to * collective metadata reads being made only by process 0 in H5D__sort_chunk(). * - * NOTE: Due to the way that the threshold value which pertains to this test - * is currently calculated within HDF5, the following two conditions must be - * true to trigger the issue: - * - * Condition 1: A certain threshold ratio must be met in order to have HDF5 - * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is - * given by the following: - * - * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30% - * - * where: - * * `sum_chunk` is the combined sum of the number of chunks selected in - * the dataset by all ranks (chunks selected by more than one rank count - * individually toward the sum for each rank selecting that chunk) - * * `dataset_nchunks` is the number of chunks in the dataset (selected - * or not) - * * `mpi_size` is the size of the MPI Communicator - * - * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain - * threshold (as of this writing, 10000). - * - * To satisfy both these conditions, we #define a macro, - * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the - * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the - * 10000 threshold from condition 2). We then create a dataset of that many - * chunks and have each MPI rank write to and read from a piece of every single - * chunk in the dataset. This ensures chunk utilization is the max possible - * and exceeds our 30% target ratio, while always exactly matching the numeric - * chunk threshold value of condition 2. - * * Failure in this test may either cause a hang, or, due to how the MPI calls * pertaining to this issue might mistakenly match up, may cause an MPI error * message similar to: diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index d5042b3..bfe306c 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -378,6 +378,7 @@ verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chu else VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status"); break; + case H5D_ALLOC_TIME_ERROR: default: if (MAINPROCESS) MESG("unknown space allocation time"); @@ -8702,6 +8703,8 @@ main(int argc, char **argv) case H5D_ALLOC_TIME_INCR: alloc_time = "Incremental"; break; + case H5D_ALLOC_TIME_DEFAULT: + case H5D_ALLOC_TIME_ERROR: default: alloc_time = "Unknown"; } |