summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/Makefile.am2
-rw-r--r--testpar/t_2Gio.c22
-rw-r--r--testpar/t_coll_chunk.c2
-rw-r--r--testpar/t_coll_md_read.c2
-rw-r--r--testpar/t_file.c2
-rw-r--r--testpar/t_file_image.c62
-rw-r--r--testpar/t_init_term.c2
-rw-r--r--testpar/t_mpi.c2
-rw-r--r--testpar/t_pread.c24
-rw-r--r--testpar/t_prestart.c6
-rw-r--r--testpar/t_prop.c10
-rw-r--r--testpar/t_pshutdown.c6
-rw-r--r--testpar/testpar.h2
13 files changed, 72 insertions, 72 deletions
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 0cdba24..4509945 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -24,7 +24,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
# Test scripts--
# testpflush.sh:
TEST_SCRIPT_PARA = testpflush.sh
-SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
+SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
check_SCRIPTS = $(TEST_SCRIPT_PARA)
diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c
index e5ab280..54ea546 100644
--- a/testpar/t_2Gio.c
+++ b/testpar/t_2Gio.c
@@ -533,17 +533,17 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[]
* with 2 MPI ranks and with $HDF5TestExpress == 0
* i.e. Exhaustive test run is allowed. Otherwise
* the test is skipped.
- *
+ *
* Thanks to l.ferraro@cineca.it for the following test::
*
- * This is a simple test case to reproduce a problem
- * occurring on LUSTRE filesystem with the creation
- * of a 4GB dataset using chunking with parallel HDF5.
- * The test works correctly if disabling chunking or
- * when the bytes assigned to each process is less
- * that 4GB. if equal or more, either hangs or results
- * in a PMPI_Waitall error.
- *
+ * This is a simple test case to reproduce a problem
+ * occurring on LUSTRE filesystem with the creation
+ * of a 4GB dataset using chunking with parallel HDF5.
+ * The test works correctly if disabling chunking or
+ * when the bytes assigned to each process is less
+ * that 4GB. if equal or more, either hangs or results
+ * in a PMPI_Waitall error.
+ *
* $> mpirun -genv I_MPI_EXTRA_FILESYSTEM on
* -genv I_MPI_EXTRA_FILESYSTEM_LIST gpfs
* -n 1 ./h5_mpi_big_dataset.x 1024 1024 1024
@@ -602,7 +602,7 @@ static int MpioTest2G( MPI_Comm comm )
*/
file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
VRFY((file_id >= 0), "H5Fcreate succeeded");
-
+
H5Pclose(plist_id);
/*
@@ -4961,7 +4961,7 @@ main(int argc, char **argv)
/* Display testing information */
if (MAINPROCESS)
TestInfo(argv[0]);
-
+
/* setup file access property list */
fapl = H5Pcreate (H5P_FILE_ACCESS);
H5Pset_fapl_mpio(fapl, test_comm, MPI_INFO_NULL);
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index e950015..740f78e 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -16,7 +16,7 @@
#define HYPER 1
#define POINT 2
-#define ALL 3
+#define ALL 3
/* some commonly used routines for collective chunk IO tests*/
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
index d4b2106..d4aaa2e 100644
--- a/testpar/t_coll_md_read.c
+++ b/testpar/t_coll_md_read.c
@@ -48,7 +48,7 @@
* in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
* this caused the non-participating ranks to issue a collective MPI_Bcast() call
* which the other ranks did not issue, thus causing a hang.
- *
+ *
* However, since these ranks are not actually reading/writing anything, this call
* can simply be removed and the address used for the read/write can be set to an
* arbitrary number (0 was chosen).
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 6183b8d..19a75c8 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -736,7 +736,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
}
/*
- * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
+ * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
* incoming fapl that could conflict with the existing values in H5F_shared_t on
* multiple opens of the same file.
*/
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index 62db11a..81bb7c2 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -21,11 +21,11 @@
*
* Process zero:
*
- * 1) Creates a core file with an integer vector data set of
- * length n (= mpi_size),
+ * 1) Creates a core file with an integer vector data set of
+ * length n (= mpi_size),
*
- * 2) Initializes the vector to zero in * location 0, and to -1
- * everywhere else.
+ * 2) Initializes the vector to zero in * location 0, and to -1
+ * everywhere else.
*
* 3) Flushes the core file, and gets an image of it. Closes
* the core file.
@@ -35,7 +35,7 @@
* 5) Awaits receipt on a file image from process n-1.
*
* 6) opens the image received from process n-1, verifies that
- * it contains a vector of length equal to mpi_size, and
+ * it contains a vector of length equal to mpi_size, and
* that the vector contains (0, 1, 2, ... n-1)
*
* 7) closes the core file and exits.
@@ -45,7 +45,7 @@
* 1) Await receipt of file image from process (i - 1).
*
* 2) Open the image with the core file driver, verify that i
- * contains a vector v of length, and that v[j] = j for
+ * contains a vector v of length, and that v[j] = j for
* 0 <= j < i, and that v[j] == -1 for i <= j < n
*
* 3) Set v[i] = i in the core file.
@@ -87,13 +87,13 @@ file_image_daisy_chain_test(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup file name */
- HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
+ HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
(int)mpi_rank);
if(mpi_rank == 0) {
-
- /* 1) Creates a core file with an integer vector data set
- * of length mpi_size,
+
+ /* 1) Creates a core file with an integer vector data set
+ * of length mpi_size,
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
@@ -111,10 +111,10 @@ file_image_daisy_chain_test(void)
dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "created data set");
-
- /* 2) Initialize the vector to zero in location 0, and
- * to -1 everywhere else.
+
+ /* 2) Initialize the vector to zero in location 0, and
+ * to -1 everywhere else.
*/
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
@@ -131,7 +131,7 @@ file_image_daisy_chain_test(void)
HDfree(vector_ptr);
vector_ptr = NULL;
-
+
/* 3) Flush the core file, and get an image of it. Close
* the core file.
*/
@@ -159,14 +159,14 @@ file_image_daisy_chain_test(void)
err = H5Pclose(fapl_id);
VRFY((err >= 0), "closed fapl(1).");
-
+
/* 4) Send the image to process 1. */
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
MPI_BYTE, 1, 0, MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
MPI_BYTE, 1, 0, MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
@@ -190,9 +190,9 @@ file_image_daisy_chain_test(void)
&rcvstat);
VRFY((mpi_result == MPI_SUCCESS), \
"received file image from process n-1");
-
+
/* 6) open the image received from process n-1, verify that
- * it contains a vector of length equal to mpi_size, and
+ * it contains a vector of length equal to mpi_size, and
* that the vector contains (0, 1, 2, ... n-1).
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -229,7 +229,7 @@ file_image_daisy_chain_test(void)
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
@@ -238,7 +238,7 @@ file_image_daisy_chain_test(void)
if(vector_ptr[i] != i)
vector_ok = FALSE;
VRFY((vector_ok), "verified received vector.");
-
+
HDfree(vector_ptr);
vector_ptr = NULL;
@@ -276,9 +276,9 @@ file_image_daisy_chain_test(void)
&rcvstat);
VRFY((mpi_result == MPI_SUCCESS), \
"received file image from process mpi_rank-1");
-
+
/* 2) Open the image with the core file driver, verify that it
- * contains a vector v of length, and that v[j] = j for
+ * contains a vector v of length, and that v[j] = j for
* 0 <= j < i, and that v[j] == -1 for i <= j < n
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -316,7 +316,7 @@ file_image_daisy_chain_test(void)
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
@@ -331,7 +331,7 @@ file_image_daisy_chain_test(void)
}
}
VRFY((vector_ok), "verified received vector.");
-
+
/* 3) Set v[i] = i in the core file. */
@@ -344,7 +344,7 @@ file_image_daisy_chain_test(void)
HDfree(vector_ptr);
vector_ptr = NULL;
-
+
/* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
@@ -359,14 +359,14 @@ file_image_daisy_chain_test(void)
bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
VRFY(bytes_read == image_len, "wrote file into image buffer");
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), \
"sent image size to process (mpi_rank + 1) % mpi_size");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), \
"sent image to process (mpi_rank + 1) % mpi_size");
@@ -374,7 +374,7 @@ file_image_daisy_chain_test(void)
HDfree(image_ptr);
image_ptr = NULL;
image_len = 0;
-
+
/* 5) close the core file and exit. */
err = H5Sclose(space_id);
diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c
index 6176bb5..0e40fe4 100644
--- a/testpar/t_init_term.c
+++ b/testpar/t_init_term.c
@@ -37,7 +37,7 @@ main (int argc, char **argv)
/* Initialize and finalize MPI */
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("Usage of Serial HDF5 after MPI_Finalize() is called");
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index 9e5d839..0719ca6 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -302,7 +302,7 @@ static int test_mpio_gb_file(char *filename) {
"proc %d: write to mpi_off=%016llx, %lld\n",
mpi_rank, mpi_off, mpi_off);
/* set data to some trivial pattern for easy verification */
- for (j = 0; j < MB; j++)
+ for (j = 0; j < MB; j++)
*(buf + j) = (int8_t)(i * mpi_size + mpi_rank);
if (VERBOSE_MED)
HDfprintf(stdout,
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index 74feeb6..ba4165e 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -281,7 +281,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
/* create a chunked dataset */
chunk[0] = COUNT/8;
-
+
if ( pass ) {
if ( (dcpl_id = H5Pcreate (H5P_DATASET_CREATE)) < 0 ) {
pass = false;
@@ -295,9 +295,9 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
failure_mssg = "H5Pset_chunk() failed.\n";
}
}
-
+
if ( pass ) {
-
+
if ( (dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT,
filespace, H5P_DEFAULT, dcpl_id,
H5P_DEFAULT)) < 0 ) {
@@ -319,7 +319,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
failure_mssg = "H5Pclose(dcpl_id) failed.\n";
}
}
-
+
if ( pass || (dset_id_ch != -1)) {
if ( H5Dclose(dset_id_ch) < 0 ) {
pass = false;
@@ -698,8 +698,8 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
data_slice = NULL;
}
- /*
- * Test reading proc0-read-and-bcast with sub-communicators
+ /*
+ * Test reading proc0-read-and-bcast with sub-communicators
*/
/* Don't test with more than LIMIT_NPROC processes to avoid memory issues */
@@ -798,7 +798,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
nextValue = 0;
else /* test 2 group 1 */
nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
-
+
i = 0;
while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
/* what we really want is data_slice[i] != nextValue --
@@ -863,7 +863,7 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
nextValue = 0;
else /* test 2 group 1 */
nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
-
+
i = 0;
while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
/* what we really want is data_slice[i] != nextValue --
@@ -893,8 +893,8 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
data_slice = NULL;
}
- /*
- * Read an H5S_ALL filespace into a hyperslab defined memory space
+ /*
+ * Read an H5S_ALL filespace into a hyperslab defined memory space
*/
if ( (data_slice = (float *)HDmalloc((size_t)(dset_size*2)*filetype_size)) == NULL ) {
@@ -979,14 +979,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
i++;
}
}
-
+
if ( pass || (memspace != -1) ) {
if ( H5Sclose(memspace) < 0 ) {
pass = false;
failure_mssg = "H5Sclose(memspace) failed.\n";
}
}
-
+
/* free data_slice if it has been allocated */
if ( data_slice != NULL ) {
HDfree(data_slice);
diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c
index 71a8277..da6bbe0 100644
--- a/testpar/t_prestart.c
+++ b/testpar/t_prestart.c
@@ -48,11 +48,11 @@ main (int argc, char **argv)
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("proper shutdown of HDF5 library");
-
+
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
@@ -121,7 +121,7 @@ main (int argc, char **argv)
HDremove(filename);
/* release data buffers */
- if(data_array)
+ if(data_array)
HDfree(data_array);
nerrors += GetTestNumErrs();
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index 62e4dde..dde322d 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -97,7 +97,7 @@ test_plist_ed(void)
int mpi_size, mpi_rank, recv_proc;
- hsize_t chunk_size = 16384; /* chunk size */
+ hsize_t chunk_size = 16384; /* chunk size */
double fill = 2.7f; /* Fill value */
size_t nslots = 521*2;
size_t nbytes = 1048576 * 10;
@@ -165,16 +165,16 @@ test_plist_ed(void)
VRFY((ret>=0), "set fill-value succeeded");
max_size[0] = 100;
- ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c
index 55073c8..6a35fb2 100644
--- a/testpar/t_pshutdown.c
+++ b/testpar/t_pshutdown.c
@@ -51,11 +51,11 @@ main (int argc, char **argv)
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("proper shutdown of HDF5 library");
-
+
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
@@ -107,7 +107,7 @@ main (int argc, char **argv)
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release data buffers */
- if(data_array)
+ if(data_array)
HDfree(data_array);
MPI_Finalize();
diff --git a/testpar/testpar.h b/testpar/testpar.h
index 2c1bce2..f76de51 100644
--- a/testpar/testpar.h
+++ b/testpar/testpar.h
@@ -34,7 +34,7 @@
if (VERBOSE_MED && *mesg != '\0') \
HDprintf("%s\n", mesg)
-/*
+/*
* VRFY: Verify if the condition val is true.
* If it is true, then call MESG to print mesg, depending on the verbose
* level.