diff options
author | Allen Byrne <50328838+byrnHDF@users.noreply.github.com> | 2021-08-25 23:35:25 (GMT) |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-08-25 23:35:25 (GMT) |
commit | 32cb23fe7e5f0a29d113960a36039adfde8398d8 (patch) | |
tree | 271a5ce6b755bcd95deb355426efe9246498fde4 /testpar | |
parent | e367c80b39529c9a863c3236e25bfb46b707b815 (diff) | |
download | hdf5-32cb23fe7e5f0a29d113960a36039adfde8398d8.zip hdf5-32cb23fe7e5f0a29d113960a36039adfde8398d8.tar.gz hdf5-32cb23fe7e5f0a29d113960a36039adfde8398d8.tar.bz2 |
Merge of mostly clang-tidy changes from 1.12 (#965)
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/t_cache.c | 2 | ||||
-rw-r--r-- | testpar/t_cache_image.c | 10 | ||||
-rw-r--r-- | testpar/t_chunk_alloc.c | 4 | ||||
-rw-r--r-- | testpar/t_filters_parallel.c | 8 | ||||
-rw-r--r-- | testpar/t_mdset.c | 4 | ||||
-rw-r--r-- | testpar/t_shapesame.c | 22 |
6 files changed, 22 insertions, 28 deletions
diff --git a/testpar/t_cache.c b/testpar/t_cache.c index 94d09e6..c7d9138 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -211,7 +211,9 @@ struct datum data[NUM_DATA_ENTRIES]; #define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES #define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10) /* Use a smaller test size to avoid creating huge MPE logfiles. */ +#ifdef H5_HAVE_MPE #define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100) +#endif int virt_num_data_entries = NUM_DATA_ENTRIES; diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c index fcbe83b..a27009d 100644 --- a/testpar/t_cache_image.c +++ b/testpar/t_cache_image.c @@ -3117,8 +3117,9 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank) H5_FAILED(); - if (show_progress) - HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg); + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } } } @@ -3401,8 +3402,9 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank) H5_FAILED(); - if (show_progress) - HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg); + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } } } diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c index 865e1f8..06f6f20 100644 --- a/testpar/t_chunk_alloc.c +++ b/testpar/t_chunk_alloc.c @@ -148,7 +148,7 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ /* verify file size */ filesize = get_filesize(filename); - est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); VRFY((filesize >= est_filesize), "file size check"); } @@ -284,7 +284,7 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti /* verify file size */ filesize = get_filesize(filename); - est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); VRFY((filesize >= est_filesize), "file size check"); /* Can close some plists */ diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c index 5153bce..50cd306 100644 --- a/testpar/t_filters_parallel.c +++ b/testpar/t_filters_parallel.c @@ -1762,8 +1762,8 @@ test_write_3d_filtered_dataset_overlap(void) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) - /* Add the amount that gets added when a rank moves down to its next section vertically in the - dataset */ + /* Add the amount that gets added when a rank moves down to its next + section vertically in the dataset */ + ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) * (i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)))); @@ -4497,8 +4497,8 @@ test_read_3d_filtered_dataset_overlap(void) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) - /* Add the amount that gets added when a rank moves down to its next section vertically in the - dataset */ + /* Add the amount that gets added when a rank moves down to its next + section vertically in the dataset */ + ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) * (i / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)))); diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index a75ffe3..6dfa6f8 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -347,7 +347,7 @@ null_dataset(void) hid_t iof, plist, dxpl, dataset, attr, sid; unsigned uval = 2; /* Buffer for writing to dataset */ int val = 1; /* Buffer for writing to attribute */ - int nelem; + hssize_t nelem; char dname[] = "dataset"; char attr_name[] = "attribute"; herr_t ret; @@ -597,7 +597,7 @@ dataset_fillvalue(void) /* Set the dataset dimension to be one row more than number of processes */ /* and calculate the actual dataset size. */ - dset_dims[0] = mpi_size + 1; + dset_dims[0] = (hsize_t)(mpi_size + 1); dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3]; /* Allocate space for the buffers */ diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index 16226ee..ab9ce26 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -446,11 +446,8 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); /* sync with the other processes before checking data */ - if (!use_collective_io) { - - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); - } + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); /* read the small data set back to verify that it contains the * expected data. Note that each process reads in the entire @@ -520,11 +517,8 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); /* sync with the other processes before checking data */ - if (!use_collective_io) { - - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); - } + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); /* read the large data set back to verify that it contains the * expected data. Note that each process reads in the entire @@ -552,12 +546,8 @@ hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker VRFY((mis_match == FALSE), "large ds init data good."); /* sync with the other processes before changing data */ - - if (!use_collective_io) { - - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc == MPI_SUCCESS), "Sync initial values check"); - } + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync initial values check"); return; |