summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@lbl.gov>2021-03-24 20:57:32 (GMT)
committerGitHub <noreply@github.com>2021-03-24 20:57:32 (GMT)
commit3dbbd8aea1b8389ad7df0c982c9a78a9a2d4dee8 (patch)
treec395b2ba3cc7a3c3d8dd8b1acdb5ef239cfed69d /testpar
parent1f637e49d98062562960dedff2a7c67423346c7d (diff)
downloadhdf5-3dbbd8aea1b8389ad7df0c982c9a78a9a2d4dee8.zip
hdf5-3dbbd8aea1b8389ad7df0c982c9a78a9a2d4dee8.tar.gz
hdf5-3dbbd8aea1b8389ad7df0c982c9a78a9a2d4dee8.tar.bz2
Align with "parallel fence" changes (#479)
* Small code updates and simplifications * Committing clang-format changes * Fix 'make installcheck' for parallel builds Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_bigio.c95
-rw-r--r--testpar/t_cache.c50
-rw-r--r--testpar/t_mdset.c141
3 files changed, 56 insertions, 230 deletions
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 7884ecb..ed99fc4 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1104,7 +1104,7 @@ static void
single_rank_independent_io(void)
{
if (mpi_rank_g == 0)
- HDprintf("single_rank_independent_io\n");
+ HDprintf("\nSingle Rank Independent I/O\n");
if (MAIN_PROCESS) {
hsize_t dims[] = {LARGE_DIM};
@@ -1223,8 +1223,6 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1250,7 +1248,7 @@ coll_chunk1(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk1\n");
+ HDprintf("\nCollective chunk I/O Test #1\n");
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1303,7 +1301,7 @@ coll_chunk2(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk2\n");
+ HDprintf("\nCollective chunk I/O Test #2\n");
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1329,8 +1327,6 @@ coll_chunk2(void)
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1357,7 +1353,7 @@ coll_chunk3(void)
{
const char *filename = FILENAME[0];
if (mpi_rank_g == 0)
- HDprintf("coll_chunk3\n");
+ HDprintf("\nCollective chunk I/O Test #3\n");
coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1385,17 +1381,9 @@ coll_chunk3(void)
*
* Failure: -1
*
- * Modifications:
- * Remove invalid temporary property checkings for API_LINK_HARD and
- * API_LINK_TRUE cases.
- * Programmer: Jonathan Kim
- * Date: 2012-10-10
- *
* Programmer: Unknown
* July 12th, 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
@@ -1845,55 +1833,10 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
HDfree(data_origin1);
}
-/*****************************************************************************
- *
- * Function: do_express_test()
- *
- * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
- * by GetTestExpress() across all processes. Return this
- * value.
- *
- * Envirmoment variables can be different across different
- * processes. This function ensures that all processes agree
- * on whether to do an express test.
- *
- * Return: Success: Maximum of the values returned by
- * GetTestExpress() across all processes.
- *
- * Failure: -1
- *
- * Programmer: JRM -- 4/25/06
- *
- *****************************************************************************/
-static int
-do_express_test(int world_mpi_rank)
-{
- int express_test;
- int max_express_test;
- int result;
-
- express_test = GetTestExpress();
-
- result =
- MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
-
- if (result != MPI_SUCCESS) {
- nerrors++;
- max_express_test = -1;
- if (VERBOSE_MED && (world_mpi_rank == 0)) {
- HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, FUNC);
- }
- }
-
- return (max_express_test);
-
-} /* do_express_test() */
-
int
main(int argc, char **argv)
{
- int ExpressMode = 0;
- hsize_t newsize = 1048576;
+ hsize_t newsize = 1048576;
/* Set the bigio processing limit to be 'newsize' bytes */
hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
@@ -1902,9 +1845,8 @@ main(int argc, char **argv)
* that we try to ensure that our bigio handling is actually
* envoked and tested.
*/
- if (newsize != oldsize) {
+ if (newsize != oldsize)
bigcount = newsize * 2;
- }
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
@@ -1915,34 +1857,25 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("Failed to turn off atexit processing. Continue.\n");
- };
/* set alarm. */
ALARM_ON;
- ExpressMode = do_express_test(mpi_rank_g);
-
dataset_big_write();
MPI_Barrier(MPI_COMM_WORLD);
dataset_big_read();
MPI_Barrier(MPI_COMM_WORLD);
- if (ExpressMode > 0) {
- if (mpi_rank_g == 0)
- HDprintf("***Express test mode on. Several tests are skipped\n");
- }
- else {
- coll_chunk1();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk2();
- MPI_Barrier(MPI_COMM_WORLD);
- coll_chunk3();
- MPI_Barrier(MPI_COMM_WORLD);
- single_rank_independent_io();
- }
+ coll_chunk1();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk2();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk3();
+ MPI_Barrier(MPI_COMM_WORLD);
+ single_rank_independent_io();
/* turn off alarm */
ALARM_OFF;
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 4846a75..4cf1139 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -6874,28 +6874,20 @@ main(int argc, char **argv)
* hang in the atexit post processing in which it may try to make MPI
* calls. By then, MPI calls may not work.
*/
- if (H5dont_atexit() < 0) {
+ if (H5dont_atexit() < 0)
HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
- };
+
H5open();
express_test = do_express_test();
-#if 0 /* JRM */
- express_test = 0;
-#endif /* JRM */
- if (express_test) {
-
+ if (express_test)
virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
- }
- else {
-
+ else
virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
- }
#ifdef H5_HAVE_MPE
- if (MAINPROCESS) {
+ if (MAINPROCESS)
HDprintf(" Tests compiled for MPE.\n");
- }
virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
#endif /* H5_HAVE_MPE */
@@ -6908,11 +6900,8 @@ main(int argc, char **argv)
}
if (mpi_size < 3) {
-
- if (MAINPROCESS) {
-
+ if (MAINPROCESS)
HDprintf(" Need at least 3 processes. Exiting.\n");
- }
goto finish;
}
@@ -6930,27 +6919,22 @@ main(int argc, char **argv)
/* setup file access property list with the world communicator */
if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", world_mpi_rank, FUNC);
- }
}
if (H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0) {
-
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", world_mpi_rank, FUNC);
- }
}
/* fix the file names */
for (u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u) {
if (h5_fixname(FILENAME[u], fapl, filenames[u], sizeof(filenames[u])) == NULL) {
-
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", world_mpi_rank, FUNC);
- }
break;
}
}
@@ -6958,9 +6942,8 @@ main(int argc, char **argv)
/* close the fapl before we set it up again */
if (H5Pclose(fapl) < 0) {
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", world_mpi_rank, FUNC);
- }
}
/* now create the fapl again, excluding the server process. */
@@ -6969,32 +6952,25 @@ main(int argc, char **argv)
/* setup file access property list */
if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) {
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", world_mpi_rank, FUNC);
- }
}
if (H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0) {
-
nerrors++;
- if (verbose) {
+ if (verbose)
HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", world_mpi_rank, FUNC);
- }
}
}
setup_rand();
max_nerrors = get_max_nerrors();
-
if (max_nerrors != 0) {
/* errors in setup -- no point in continuing */
-
- if (world_mpi_rank == 0) {
-
+ if (world_mpi_rank == 0)
HDfprintf(stdout, "Errors in test initialization. Exiting.\n");
- }
goto finish;
}
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 2eca297..3041e77 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -129,12 +129,6 @@ zero_dim_dset(void)
/*
* Example of using PHDF5 to create ndatasets datasets. Each process write
* a slab of array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_dset_write(void)
@@ -218,12 +212,6 @@ multiple_dset_write(void)
}
/* Example of using PHDF5 to create, write, and read compact dataset.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
compact_dataset(void)
@@ -353,14 +341,6 @@ compact_dataset(void)
/*
* Example of using PHDF5 to create, write, and read dataset and attribute
* of Null dataspace.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/24/04
*/
void
null_dataset(void)
@@ -465,14 +445,6 @@ null_dataset(void)
* Actual data is _not_ written to these datasets. Dataspaces are exact
* sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
* the boundary of interest.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
void
big_dataset(void)
@@ -594,16 +566,6 @@ big_dataset(void)
/* Example of using PHDF5 to read a partial written dataset. The dataset does
* not have actual data written to the entire raw data area and relies on the
* default fill value of zeros to work correctly.
- *
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
- *
- * Also added code to free dynamically allocated buffers.
- *
- * JRM - 8/11/04
*/
void
dataset_fillvalue(void)
@@ -710,15 +672,16 @@ dataset_fillvalue(void)
for (i = 0; i < (int)dset_dims[0]; i++)
for (j = 0; j < (int)dset_dims[1]; j++)
for (k = 0; k < (int)dset_dims[2]; k++)
- for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ for (l = 0; l < (int)dset_dims[3]; l++, trdata++)
if (*trdata != 0)
if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,
- j, k, l, *trdata);
+ HDprintf(
+ "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ mpi_rank, i, j, k, l, *trdata);
if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- HDprintf("[more errors ...]\n");
+ HDprintf("Rank %d: [more errors ...]\n", mpi_rank);
if (err_num) {
- HDprintf("%d errors found in check_value\n", err_num);
+ HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num);
nerrors++;
}
}
@@ -856,12 +819,6 @@ collective_group_write_independent_group_read(void)
/* Write multiple groups with a chunked dataset in each group collectively.
* These groups and datasets are for testing independent read later.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
collective_group_write(void)
@@ -896,6 +853,7 @@ collective_group_write(void)
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid >= 0), "H5Fcreate");
H5Pclose(plist);
/* decide the hyperslab according to process number. */
@@ -909,13 +867,13 @@ collective_group_write(void)
ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
VRFY((memspace >= 0), "memspace");
VRFY((filespace >= 0), "filespace");
- VRFY((ret1 >= 0), "mgroup memspace selection");
- VRFY((ret2 >= 0), "mgroup filespace selection");
+ VRFY((ret1 == 0), "mgroup memspace selection");
+ VRFY((ret2 == 0), "mgroup filespace selection");
dcpl = H5Pcreate(H5P_DATASET_CREATE);
ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
VRFY((dcpl >= 0), "dataset creation property");
- VRFY((ret1 >= 0), "set chunk for dataset creation property");
+ VRFY((ret1 == 0), "set chunk for dataset creation property");
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
@@ -932,10 +890,14 @@ collective_group_write(void)
for (j = 0; j < size; j++)
outme[(i * size) + j] = (i + j) * 1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ VRFY((ret1 == 0), "H5Dwrite");
- H5Dclose(did);
- H5Gclose(gid);
+ ret1 = H5Dclose(did);
+ VRFY((ret1 == 0), "H5Dclose");
+
+ ret1 = H5Gclose(gid);
+ VRFY((ret1 == 0), "H5Gclose");
#ifdef BARRIER_CHECKS
if (!((m + 1) % 10)) {
@@ -948,7 +910,9 @@ collective_group_write(void)
H5Pclose(dcpl);
H5Sclose(filespace);
H5Sclose(memspace);
- H5Fclose(fid);
+
+ ret1 = H5Fclose(fid);
+ VRFY((ret1 == 0), "H5Fclose");
HDfree(outme);
}
@@ -964,6 +928,7 @@ independent_group_read(void)
const H5Ptest_param_t *pt;
char * filename;
int ngroups;
+ herr_t ret;
pt = GetTestParameters();
filename = pt->name;
@@ -975,6 +940,7 @@ independent_group_read(void)
H5Pset_all_coll_metadata_ops(plist, FALSE);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((fid > 0), "H5Fopen");
H5Pclose(plist);
/* open groups and read datasets. Odd number processes read even number
@@ -989,20 +955,11 @@ independent_group_read(void)
group_dataset_read(fid, mpi_rank, m);
}
- H5Fclose(fid);
+ ret = H5Fclose(fid);
+ VRFY((ret == 0), "H5Fclose");
}
/* Open and read datasets and compare data
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * Also added code to verify the results of dynamic memory
- * allocations, and to free dynamically allocated memeory
- * when we are done with it.
- *
- * JRM - 8/16/04
*/
static void
group_dataset_read(hid_t fid, int mpi_rank, int m)
@@ -1035,16 +992,17 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
/* this is the original value */
for (i = 0; i < size; i++)
- for (j = 0; j < size; j++) {
+ for (j = 0; j < size; j++)
outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
- }
/* compare the original value(outdata) to the value in file(indata).*/
ret = check_value(indata, outdata, size);
VRFY((ret == 0), "check the data");
- H5Dclose(did);
- H5Gclose(gid);
+ ret = H5Dclose(did);
+ VRFY((ret == 0), "H5Dclose");
+ ret = H5Gclose(gid);
+ VRFY((ret == 0), "H5Gclose");
HDfree(indata);
HDfree(outdata);
@@ -1076,11 +1034,6 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
* + means the group has attribute(s).
* ' means the datasets in the groups have attribute(s).
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
void
multiple_group_write(void)
@@ -1164,12 +1117,6 @@ multiple_group_write(void)
/*
* In a group, creates NDATASETS datasets. Each process writes a hyperslab
* of a data array to the file.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/16/04
*/
static void
write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1243,12 +1190,6 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
/*
* This function is to verify the data from multiple group testing. It opens
* every dataset in every group and check their correctness.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
void
multiple_group_read(void)
@@ -1323,12 +1264,6 @@ multiple_group_read(void)
/*
* This function opens all the datasets in a certain, checks the data using
* dataset_vrfy function.
- *
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
- *
- * JRM - 8/11/04
*/
static int
read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
@@ -1475,12 +1410,6 @@ read_attribute(hid_t obj_id, int this_type, int num)
/* This functions compares the original data with the read-in data for its
* hyperslab part only by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/16/04
*/
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
@@ -1513,12 +1442,6 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
}
/* Decide the portion of data chunk in dataset by process ID.
- *
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
- *
- * JRM - 8/11/04
*/
static void
@@ -1560,8 +1483,6 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t
* This function reproduces this situation. At present the test hangs
* on failure.
* JRM - 9/13/04
- *
- * Changes: None.
*/
#define N 4
@@ -1805,10 +1726,6 @@ io_mode_confusion(void)
* cache clients will have to construct on disk images on demand.
*
* JRM -- 10/13/10
- *
- * Changes:
- * Break it into two parts, a writer to write the file and a reader
- * the correctness of the writer. AKC -- 2010/10/27
*/
#define NUM_DATA_SETS 4