summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_coll_chunk.c48
-rw-r--r--testpar/t_dset.c2
-rw-r--r--testpar/t_fphdf5.c118
-rw-r--r--testpar/t_mdset.c298
-rw-r--r--testpar/t_mpi.c60
-rw-r--r--testpar/t_ph5basic.c6
-rw-r--r--testpar/t_span_tree.c290
-rw-r--r--testpar/testphdf5.c58
-rw-r--r--testpar/testphdf5.h170
9 files changed, 525 insertions, 525 deletions
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 0d8980b..765124d 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -26,13 +26,13 @@
static void ccslab_set(int mpi_rank,int mpi_size,hsize_t start[],hsize_t count[],
hsize_t stride[],hsize_t block[],int mode);
-static void ccdataset_fill(hsize_t start[],hsize_t count[],
- hsize_t stride[],hsize_t block[],DATATYPE*dataset);
+static void ccdataset_fill(hsize_t start[],hsize_t count[],
+ hsize_t stride[],hsize_t block[],DATATYPE*dataset);
static void ccdataset_print(hsize_t start[],hsize_t block[],DATATYPE*dataset);
-static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[],
- hsize_t block[], DATATYPE *dataset, DATATYPE *original);
+static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[],
+ hsize_t block[], DATATYPE *dataset, DATATYPE *original);
static void coll_chunktest(const char* filename,int chunk_factor,int select_factor);
@@ -94,7 +94,7 @@ coll_chunk4(void)
const char *filename;
int mpi_size;
MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Comm_size(comm,&mpi_size);
+ MPI_Comm_size(comm,&mpi_size);
filename = GetTestParameters();
coll_chunktest(filename,mpi_size*2,BYROW_DISCONT);
@@ -106,7 +106,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
hid_t file,dataset, file_dataspace;
hid_t acc_plist,xfer_plist,crp_plist;
hsize_t dims[RANK], chunk_dims[RANK];
- int* data_array1 = NULL;
+ int* data_array1 = NULL;
int* data_origin1 = NULL;
herr_t status;
hsize_t start[RANK];
@@ -129,7 +129,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
status = H5Pset_fapl_mpio(acc_plist,comm,info);
VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
-
+
file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
VRFY((file >= 0),"H5Fcreate succeeded");
@@ -137,11 +137,11 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
VRFY((status >= 0),"");
/* setup dimensionality object */
-
+
dims[0] = SPACE_DIM1;
dims[1] = SPACE_DIM2;
- /* each process takes a slab of rows
+ /* each process takes a slab of rows
stride[0] = 1;
stride[1] = 1;
count[0] = SPACE_DIM1/mpi_size;
@@ -164,7 +164,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
crp_plist = H5Pcreate(H5P_DATASET_CREATE);
VRFY((crp_plist >= 0),"");
-
+
/* test1: chunk size is equal to dataset size */
chunk_dims[0] = SPACE_DIM1/chunk_factor;
@@ -173,9 +173,9 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
else chunk_dims[1] = SPACE_DIM2/chunk_factor;
status = H5Pset_chunk(crp_plist, 2, chunk_dims);
VRFY((status >= 0),"chunk creation property list succeeded");
-
+
dataset = H5Dcreate(file,DSET_COLLECTIVE_CHUNK_NAME,H5T_NATIVE_INT,
- file_dataspace,crp_plist);
+ file_dataspace,crp_plist);
VRFY((dataset >= 0),"dataset created succeeded");
/* H5Sclose(file_dataspace); */
@@ -225,19 +225,19 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
}
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
status = H5Dclose(dataset);
- VRFY((status >= 0),"");
+ VRFY((status >= 0),"");
/* check whether using collective IO */
/* Should use H5Pget and H5Pinsert to handle this test. */
status = H5Pclose(xfer_plist);
- VRFY((status >= 0),"property list closed");
+ VRFY((status >= 0),"property list closed");
status = H5Sclose(file_dataspace);
- VRFY((status >= 0),"");
+ VRFY((status >= 0),"");
status = H5Fclose(file);
- VRFY((status >= 0),"");
+ VRFY((status >= 0),"");
if (data_array1) free(data_array1);
@@ -257,7 +257,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
status = H5Pset_fapl_mpio(acc_plist,comm,info);
VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
-
+
file = H5Fopen(filename,H5F_ACC_RDONLY,acc_plist);
VRFY((file >= 0),"H5Fcreate succeeded");
@@ -279,8 +279,8 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
/* fill dataset with test data */
ccdataset_fill(start, stride,count,block, data_origin1);
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0),"");
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0),"");
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((status>= 0),"MPIO collective transfer property succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
@@ -329,7 +329,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
/* release data buffers */
if (data_array1) free(data_array1);
if (data_origin1) free(data_origin1);
-
+
}
@@ -409,11 +409,11 @@ ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block
dataptr = tmptr + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
start[1]+k2*stride[1]+j);
-
+
*dataptr = (DATATYPE)(k1+k2+i+j);
}
}
- }
+ }
}
}
@@ -469,7 +469,7 @@ ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block
}
vrfyerrs = 0;
-
+
for (k1 = 0; k1 < count[0];k1++) {
for(i = 0;i < block[0];i++) {
for(k2 = 0; k2<count[1];k2++) {
@@ -479,7 +479,7 @@ ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block
start[1]+k2*stride[1]+j);
oriptr = original + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+
start[1]+k2*stride[1]+j);
-
+
if (*dataptr != *oriptr){
if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index dff3ec3..2591591 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2145,7 +2145,7 @@ compress_readAll(void)
VRFY((dataspace > 0), "H5Screate_simple succeeded");
/* Create dataset */
- dataset = H5Dcreate(fid, "compressed_data", H5T_NATIVE_INT, dataspace, dcpl);
+ dataset = H5Dcreate(fid, "compressed_data", H5T_NATIVE_INT, dataspace, dcpl);
VRFY((dataset > 0), "H5Screate_simple succeeded");
/* Write compressed data */
diff --git a/testpar/t_fphdf5.c b/testpar/t_fphdf5.c
index 5dbc41f..b1e9535 100644
--- a/testpar/t_fphdf5.c
+++ b/testpar/t_fphdf5.c
@@ -42,11 +42,11 @@ static void access_dset(hid_t loc, const char *dset_name);
static void slab_set(hsize_t start[], hsize_t count[],
hsize_t stride[], hsize_t block[]);
static void fill_data(void);
-static void write_data(hid_t loc, const char *dset_name,
+static void write_data(hid_t loc, const char *dset_name,
hsize_t start[], hsize_t count[],
hsize_t stride[], hsize_t block[]);
static void verify_complete_dataset(hid_t loc, const char *dset_name);
-static void verify_partial_dataset(hid_t loc, const char *dset_name,
+static void verify_partial_dataset(hid_t loc, const char *dset_name,
hsize_t start[], hsize_t count[],
hsize_t stride[], hsize_t block[],
int * buf, hsize_t buf_len);
@@ -59,7 +59,7 @@ static void usage(const char *prog);
/*===----------------------------------------------------------------------===
* Filenames
*===----------------------------------------------------------------------===
- * The names of the test files for
+ * The names of the test files for
*/
static const char *FILENAME[2] = { /* List of files we want to create */
"FPHDF5Test",
@@ -120,20 +120,20 @@ static char dset_name[128];
static const char *grp_tmpl = "Process %d's Datasets";
static char grp_name[128];
-#if 0
-/* A useful debugging function, but no need to compile it unless
+#if 0
+/* A useful debugging function, but no need to compile it unless
* we are going to use it. JRM - 4/13/4
*/
/*-------------------------------------------------------------------------
* Function: check_globals
- * Purpose: Debugging Function. Check the current values of some
- * globals, and generate a message when they change.
+ * Purpose: Debugging Function. Check the current values of some
+ * globals, and generate a message when they change.
* Return: void
* Programmer: John Mainzer - 3/3/04
- * Modifications:
+ * Modifications:
*-------------------------------------------------------------------------
*/
-static void
+static void
check_globals(char * location_name)
{
static hsize_t local_dims[RANK] = {0,0};
@@ -178,23 +178,23 @@ check_globals(char * location_name)
}
return;
} /* check_globals() */
-#endif
+#endif
-#if 0
-/* Another useful debugging function, again no need to compile it unless
+#if 0
+/* Another useful debugging function, again no need to compile it unless
* we are going to use it. JRM - 4/13/04
*/
/*-------------------------------------------------------------------------
* Function: print_globals
- * Purpose: Debugging Function. Display the current values of some
- * globals.
+ * Purpose: Debugging Function. Display the current values of some
+ * globals.
* Return: void
* Programmer: John Mainzer - 3/9/04
- * Modifications:
+ * Modifications:
*-------------------------------------------------------------------------
*/
-static void
+static void
print_globals(void)
{
printf("%d: dims=[%d,%d], start=[%d,%d], count=[%d, %d], stride=[%d,%d], block=[%d,%d]\n",
@@ -258,9 +258,9 @@ create_group(hid_t loc, const char *group_name, size_t size_hint)
* Failure: Aborts
* Programmer: Bill Wendling
* 29. October 2003
- * Modifications:
+ * Modifications:
* Altered function to use the global dims array, instead
- * of a locally declared and initialized version.
+ * of a locally declared and initialized version.
* JRM - 3/3/04
*-------------------------------------------------------------------------
*/
@@ -342,11 +342,11 @@ slab_set(hsize_t my_start[], hsize_t my_count[], hsize_t my_stride[], hsize_t my
* 13. November 2003
* Modifications:
* Complete re-write of function. The orig_data array is
- * now allocated (in main) with size equal the size of the
- * array on file, and is loaded with the data we expect to
+ * now allocated (in main) with size equal the size of the
+ * array on file, and is loaded with the data we expect to
* find there.
*
- * The new local_orig_data array is allocated to match the
+ * The new local_orig_data array is allocated to match the
* size of this processes contribution to the on file data
* set, and is loaded with this processes data.
*
@@ -374,8 +374,8 @@ fill_data(void)
}
} else if ( proc_num == mpi_rank ) {
for ( col = 0; col < DIM1; col++ ) {
- local_orig_data[local_offset++] =
- orig_data[offset++] =
+ local_orig_data[local_offset++] =
+ orig_data[offset++] =
(proc_num * 1000) + (row * 100) + col;
}
} else {
@@ -450,7 +450,7 @@ write_data(hid_t loc, const char *dataset_name, hsize_t my_start[], hsize_t my_c
/*-------------------------------------------------------------------------
* Function: verify_complete_dataset
- * Purpose: Verify that all the data in the dataset is correct --
+ * Purpose: Verify that all the data in the dataset is correct --
* including that written by other processes.
* Return: Nothing
* Programmer: John Mainzer
@@ -482,7 +482,7 @@ verify_complete_dataset(hid_t loc, const char *dataset_name)
/* Read the dataset */
VRFY((H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, data_array) >= 0), "H5Dread");
-
+
/* Verify the contents of the dataset */
for ( row = 0; row < dims[0]; row++ ) {
@@ -499,8 +499,8 @@ verify_complete_dataset(hid_t loc, const char *dataset_name)
}
VRFY((offset == (mpi_size * DIM0 * DIM1)), "offset OK");
- if (vrfyerrs) {
- fprintf(stdout, "%d: %d errors found in verify_complete_dataset\n",
+ if (vrfyerrs) {
+ fprintf(stdout, "%d: %d errors found in verify_complete_dataset\n",
mpi_rank, vrfyerrs);
++nerrors;
}
@@ -510,14 +510,14 @@ verify_complete_dataset(hid_t loc, const char *dataset_name)
if ( data_array != NULL ) {
free(data_array);
}
-
+
return;
} /* verify_complete_dataset() */
/*-------------------------------------------------------------------------
* Function: verify_partial_dataset
- * Purpose: Verify that the data in the specified section of the
+ * Purpose: Verify that the data in the specified section of the
* dataset matches the contents of the provided buffer.
* Return: Nothing
* Programmer: John Mainzer
@@ -527,7 +527,7 @@ verify_complete_dataset(hid_t loc, const char *dataset_name)
*-------------------------------------------------------------------------
*/
static void
-verify_partial_dataset(hid_t loc, const char *dataset_name,
+verify_partial_dataset(hid_t loc, const char *dataset_name,
hsize_t my_start[], hsize_t my_count[],
hsize_t my_stride[], hsize_t my_block[],
int * buf, hsize_t buf_len)
@@ -546,7 +546,7 @@ verify_partial_dataset(hid_t loc, const char *dataset_name,
file_dataspace = H5Dget_space(dataset);
VRFY((file_dataspace >= 0), "H5Dget_space");
VRFY((H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET,
- my_start, my_stride, my_count, my_block) >= 0),
+ my_start, my_stride, my_count, my_block) >= 0),
"H5Sselect_hyperslab in verify_partial_dataset");
/* Create a memory dataspace */
@@ -556,8 +556,8 @@ verify_partial_dataset(hid_t loc, const char *dataset_name,
/* Read the dataset */
VRFY((dims[0] != 0), "dims array initialized.");
- data_array = (int *)malloc(((size_t)block[0]) *
- ((size_t)block[1]) *
+ data_array = (int *)malloc(((size_t)block[0]) *
+ ((size_t)block[1]) *
sizeof(int));
VRFY((H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
H5P_DEFAULT, data_array) >= 0), "H5Dread");
@@ -578,7 +578,7 @@ verify_partial_dataset(hid_t loc, const char *dataset_name,
VRFY((offset == buf_len), "offset OK");
if (vrfyerrs) {
- fprintf(stdout, "%d: %d errors found in verify_partial_dataset\n",
+ fprintf(stdout, "%d: %d errors found in verify_partial_dataset\n",
mpi_rank, vrfyerrs);
++nerrors;
}
@@ -718,7 +718,7 @@ test_dataset_access(hid_t loc)
* Programmer: Bill Wendling
* 11. November 2003
* Modifications:
- * Replaced calls to verify_dataset() with calls to
+ * Replaced calls to verify_dataset() with calls to
* verify_partial_dataset(). In the absence of a barrier,
* we don't know that the other processes have done their
* writes to the datasets as well. Thus we only check the
@@ -745,45 +745,45 @@ test_dataset_write(hid_t loc)
/* Write to this dataset */
sprintf(dset_name, dset_tmpl, 0);
- printf("%d: Writing to \"/%s/%s\"\n",
+ printf("%d: Writing to \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
write_data(group, dset_name, start, count, stride, block);
- printf("%d: Verifying dataset \"/%s/%s\"\n",
+ printf("%d: Verifying dataset \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
- verify_partial_dataset(group, dset_name,
+ verify_partial_dataset(group, dset_name,
start, count, stride, block,
local_orig_data, (block[0] * block[1]));
sprintf(dset_name, dset_tmpl, 1);
- printf("%d: Writing to \"/%s/%s\"\n",
+ printf("%d: Writing to \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
write_data(group, dset_name, start, count, stride, block);
- printf("%d: Verifying dataset \"/%s/%s\"\n",
+ printf("%d: Verifying dataset \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
- verify_partial_dataset(group, dset_name,
+ verify_partial_dataset(group, dset_name,
start, count, stride, block,
local_orig_data, (block[0] * block[1]));
sprintf(dset_name, dset_tmpl, 2);
- printf("%d: Writing to \"/%s/%s\"\n",
+ printf("%d: Writing to \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
write_data(group, dset_name, start, count, stride, block);
- printf("%d: Verifying dataset \"/%s/%s\"\n",
+ printf("%d: Verifying dataset \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
- verify_partial_dataset(group, dset_name,
+ verify_partial_dataset(group, dset_name,
start, count, stride, block,
local_orig_data, (block[0] * block[1]));
sprintf(dset_name, dset_tmpl, 3);
- printf("%d: Writing to \"/%s/%s\"\n",
+ printf("%d: Writing to \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
write_data(group, dset_name, start, count, stride, block);
- printf("%d: Verifying dataset \"/%s/%s\"\n",
+ printf("%d: Verifying dataset \"/%s/%s\"\n",
mpi_rank, grp_name, dset_name);
- verify_partial_dataset(group, dset_name,
- start, count, stride, block,
+ verify_partial_dataset(group, dset_name,
+ start, count, stride, block,
local_orig_data, (block[0] * block[1]));
-
+
/* Close the group */
VRFY((H5Gclose(group) >= 0), "H5Gclose");
}
@@ -830,7 +830,7 @@ usage(const char *prog)
* This array is used to store the data written by this
* process. JRM - 3/5/04
*
- * Replaced calls to verify_dataset() with calls to
+ * Replaced calls to verify_dataset() with calls to
* verify_complete_dataset(). JRM - 3/8/04
*-------------------------------------------------------------------------
*/
@@ -848,7 +848,7 @@ main(int argc, char *argv[])
H5open();
for (nargs = argc; nargs > 1; --nargs)
- if (strcmp(argv[nargs - 1], "-v") == 0 ||
+ if (strcmp(argv[nargs - 1], "-v") == 0 ||
strcmp(argv[nargs - 1], "--verbose") == 0 ||
strcmp(argv[nargs - 1], "--verbos") == 0 ||
strcmp(argv[nargs - 1], "--verbo") == 0 ||
@@ -899,14 +899,14 @@ main(int argc, char *argv[])
slab_set(start, count, stride, block);
VRFY((dims[0] != 0), "dims array initialized.");
- orig_data = (int *)malloc(((size_t)dims[0]) *
- ((size_t)dims[1]) *
+ orig_data = (int *)malloc(((size_t)dims[0]) *
+ ((size_t)dims[1]) *
sizeof(int));
VRFY((orig_data != NULL), "orig_data malloc succeeded");
VRFY((block[0] != 0), "block array initialized.");
- local_orig_data = (int *)malloc(((size_t)block[0]) *
- ((size_t)block[1]) *
+ local_orig_data = (int *)malloc(((size_t)block[0]) *
+ ((size_t)block[1]) *
sizeof(int));
VRFY((orig_data != NULL), "local_orig_data malloc succeeded");
@@ -969,7 +969,7 @@ main(int argc, char *argv[])
printf("%d: Reverifying dataset \"/%s/%s\"\n", mpi_rank,
grp_name, dset_name);
verify_complete_dataset(group, dset_name);
-
+
/* Close the group */
printf("%d: Closing group.", mpi_rank);
VRFY((H5Gclose(group) >= 0), "H5Gclose");
@@ -985,9 +985,9 @@ main(int argc, char *argv[])
if ( local_orig_data != NULL ) {
free(local_orig_data);
}
-#if 1
- /* It is useful to keep the hdf file created by this test for
- * debugging purposes. However, this code should always be
+#if 1
+ /* It is useful to keep the hdf file created by this test for
+ * debugging purposes. However, this code should always be
* turned on for checkin. JRM - 4/13/04
*/
if (fapl > -1)
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 06aedf3..df2a9a6 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -19,7 +19,7 @@
#define NDATASET 4
#define GROUP_DEPTH 128
enum obj_type { is_group, is_dset };
-
+
int get_size(void);
void write_dataset(hid_t, hid_t, hid_t);
@@ -34,10 +34,10 @@ void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int);
/*
- * The size value computed by this function is used extensively in
- * configuring tests for the current number of processes.
+ * The size value computed by this function is used extensively in
+ * configuring tests for the current number of processes.
*
- * This function was created as part of an effort to allow the
+ * This function was created as part of an effort to allow the
* test functions in this file to run on an arbitrary number of
* processors.
* JRM - 8/11/04
@@ -48,7 +48,7 @@ int get_size(void)
int mpi_rank;
int mpi_size;
int size = SIZE;
-
+
MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */
MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);
@@ -118,7 +118,7 @@ void multiple_dset_write(void)
VRFY((ret>=0), "H5Pclose succeeded");
/* decide the hyperslab according to process number. */
- get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
memspace = H5Screate_simple (DIM, chunk_dims, NULL);
filespace = H5Screate_simple (DIM, file_dims, NULL);
@@ -135,7 +135,7 @@ void multiple_dset_write(void)
for (n = 0; n < ndatasets; n++) {
sprintf (dname, "dataset %d", n);
dataset = H5Dcreate (iof, dname, H5T_NATIVE_DOUBLE, filespace, dcpl);
- VRFY((dataset > 0), dname);
+ VRFY((dataset > 0), dname);
/* calculate data to write */
for (i = 0; i < size; i++)
@@ -162,7 +162,7 @@ void multiple_dset_write(void)
}
-/* Example of using PHDF5 to create, write, and read compact dataset.
+/* Example of using PHDF5 to create, write, and read compact dataset.
*
* Changes: Updated function to use a dynamically calculated size,
* instead of the old SIZE #define. This should allow it
@@ -188,7 +188,7 @@ void compact_dataset(void)
{
file_dims[i] = size;
}
-
+
MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);
@@ -216,7 +216,7 @@ void compact_dataset(void)
VRFY((ret >= 0), "set space allocation time for compact dataset");
dataset = H5Dcreate (iof, dname, H5T_NATIVE_DOUBLE, filespace, dcpl);
- VRFY((dataset >= 0), "H5Dcreate succeeded");
+ VRFY((dataset >= 0), "H5Dcreate succeeded");
/* set up the collective transfer properties list */
dxpl = H5Pcreate (H5P_DATASET_XFER);
@@ -260,8 +260,8 @@ void compact_dataset(void)
for (j = 0; j < size; j++)
if ( inme[(i * size) + j] != outme[(i * size) + j])
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
-
+ printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
+
H5Pclose(plist);
H5Pclose(dxpl);
H5Dclose(dataset);
@@ -271,12 +271,12 @@ void compact_dataset(void)
}
/*
- * Example of using PHDF5 to create, write, and read dataset and attribute
+ * Example of using PHDF5 to create, write, and read dataset and attribute
* of Null dataspace.
*
* Changes: Removed the assert that mpi_size <= the SIZE #define.
* As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
+ * and in any case, the SIZE #define is being removed
* in an update of the functions in this file to run
* with an arbitrary number of processes.
*
@@ -300,7 +300,7 @@ void null_dataset(void)
filename = GetTestParameters();
- plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL,
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL,
facc_type, use_gpfs);
iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
@@ -380,7 +380,7 @@ void null_dataset(void)
*
* Changes: Removed the assert that mpi_size <= the SIZE #define.
* As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
+ * and in any case, the SIZE #define is being removed
* in an update of the functions in this file to run
* with an arbitrary number of processes.
*
@@ -399,7 +399,7 @@ void big_dataset(void)
MPI_Offset file_size; /* Size of file on disk */
herr_t ret; /* Generic return value */
const char *filename;
-
+
MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);
@@ -409,13 +409,13 @@ void big_dataset(void)
filename = GetTestParameters();
fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
- VRFY((fapl >= 0), "create_faccess_plist succeeded");
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
/*
* Create >2GB HDF5 file
*/
iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((iof >= 0), "H5Fcreate succeeded");
+ VRFY((iof >= 0), "H5Fcreate succeeded");
/* Define dataspace for 2GB dataspace */
file_dims[0]= 2;
@@ -423,28 +423,28 @@ void big_dataset(void)
file_dims[2]= 1024;
file_dims[3]= 1024;
filespace = H5Screate_simple (4, file_dims, NULL);
- VRFY((filespace >= 0), "H5Screate_simple succeeded");
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate succeeded");
+ VRFY((dataset >= 0), "H5Dcreate succeeded");
/* Close all file objects */
ret=H5Dclose (dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
+ VRFY((ret >= 0), "H5Dclose succeeded");
ret=H5Sclose (filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret=H5Fclose (iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ VRFY((ret >= 0), "H5Fclose succeeded");
/* Check that file of the correct size was created */
file_size=h5_mpi_get_file_size(filename, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((file_size == 2147485696ULL), "File is correct size");
+ VRFY((file_size == 2147485696ULL), "File is correct size");
/*
* Create >4GB HDF5 file
*/
iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((iof >= 0), "H5Fcreate succeeded");
+ VRFY((iof >= 0), "H5Fcreate succeeded");
/* Define dataspace for 4GB dataspace */
file_dims[0]= 4;
@@ -452,28 +452,28 @@ void big_dataset(void)
file_dims[2]= 1024;
file_dims[3]= 1024;
filespace = H5Screate_simple (4, file_dims, NULL);
- VRFY((filespace >= 0), "H5Screate_simple succeeded");
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate succeeded");
+ VRFY((dataset >= 0), "H5Dcreate succeeded");
/* Close all file objects */
ret=H5Dclose (dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
+ VRFY((ret >= 0), "H5Dclose succeeded");
ret=H5Sclose (filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret=H5Fclose (iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ VRFY((ret >= 0), "H5Fclose succeeded");
/* Check that file of the correct size was created */
file_size=h5_mpi_get_file_size(filename, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((file_size == 4294969344ULL), "File is correct size");
+ VRFY((file_size == 4294969344ULL), "File is correct size");
/*
* Create >8GB HDF5 file
*/
iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((iof >= 0), "H5Fcreate succeeded");
+ VRFY((iof >= 0), "H5Fcreate succeeded");
/* Define dataspace for 8GB dataspace */
file_dims[0]= 8;
@@ -481,26 +481,26 @@ void big_dataset(void)
file_dims[2]= 1024;
file_dims[3]= 1024;
filespace = H5Screate_simple (4, file_dims, NULL);
- VRFY((filespace >= 0), "H5Screate_simple succeeded");
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate (iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate succeeded");
+ VRFY((dataset >= 0), "H5Dcreate succeeded");
/* Close all file objects */
ret=H5Dclose (dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
+ VRFY((ret >= 0), "H5Dclose succeeded");
ret=H5Sclose (filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret=H5Fclose (iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ VRFY((ret >= 0), "H5Fclose succeeded");
/* Check that file of the correct size was created */
file_size=h5_mpi_get_file_size(filename, MPI_COMM_WORLD, MPI_INFO_NULL);
- VRFY((file_size == 8589936640ULL), "File is correct size");
+ VRFY((file_size == 8589936640ULL), "File is correct size");
/* Close fapl */
ret=H5Pclose (fapl);
- VRFY((ret >= 0), "H5Pclose succeeded");
+ VRFY((ret >= 0), "H5Pclose succeeded");
}
/* Example of using PHDF5 to read a partial written dataset. The dataset does
@@ -509,10 +509,10 @@ void big_dataset(void)
*
* Changes: Removed the assert that mpi_size <= the SIZE #define.
* As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
+ * and in any case, the SIZE #define is being removed
* in an update of the functions in this file to run
* with an arbitrary number of processes.
- *
+ *
* Also added code to free dynamically allocated buffers.
*
* JRM - 8/11/04
@@ -538,7 +538,7 @@ void dataset_fillvalue(void)
int acc, i, j, k, l; /* Local index variables */
herr_t ret; /* Generic return value */
const char *filename;
-
+
MPI_Comm_rank (MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size (MPI_COMM_WORLD, &mpi_size);
@@ -654,7 +654,7 @@ void dataset_fillvalue(void)
if(i<mpi_size) {
if( *twdata != *trdata )
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
+ printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
} /* end if */
else {
if( *trdata != 0)
@@ -670,11 +670,11 @@ void dataset_fillvalue(void)
/* Close all file objects */
ret=H5Dclose (dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
+ VRFY((ret >= 0), "H5Dclose succeeded");
ret=H5Sclose (filespace);
- VRFY((ret >= 0), "H5Sclose succeeded");
+ VRFY((ret >= 0), "H5Sclose succeeded");
ret=H5Fclose (iof);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ VRFY((ret >= 0), "H5Fclose succeeded");
/* Close memory dataspace */
ret=H5Sclose (memspace);
@@ -693,7 +693,7 @@ void dataset_fillvalue(void)
HDfree(wdata);
}
-/* Write multiple groups with a chunked dataset in each group collectively.
+/* Write multiple groups with a chunked dataset in each group collectively.
* These groups and datasets are for testing independent read later.
*
* Changes: Updated function to use a dynamically calculated size,
@@ -738,27 +738,27 @@ void collective_group_write(void)
H5Pclose(plist);
/* decide the hyperslab according to process number. */
- get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
/* select hyperslab in memory and file spaces. These two operations are
* identical since the datasets are the same. */
memspace = H5Screate_simple(DIM, file_dims, NULL);
- ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
+ ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
chunk_dims, count, chunk_dims);
filespace = H5Screate_simple(DIM, file_dims, NULL);
- ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
+ ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
chunk_dims, count, chunk_dims);
VRFY((memspace>=0), "memspace");
VRFY((filespace>=0), "filespace");
VRFY((ret1>=0), "mgroup memspace selection");
- VRFY((ret2>=0), "mgroup filespace selection");
-
+ VRFY((ret2>=0), "mgroup filespace selection");
+
dcpl = H5Pcreate(H5P_DATASET_CREATE);
ret1 = H5Pset_chunk (dcpl, 2, chunk_size);
VRFY((dcpl>=0), "dataset creation property");
VRFY((ret1>=0), "set chunk for dataset creation property");
-
- /* creates ngroups groups under the root group, writes chunked
+
+ /* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
for(m = 0; m < ngroups; m++) {
sprintf(gname, "group%d", m);
@@ -773,7 +773,7 @@ void collective_group_write(void)
for(j=0; j < size; j++)
outme[(i * size) + j] = (i+j)*1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
outme);
H5Dclose(did);
@@ -786,7 +786,7 @@ void collective_group_write(void)
}
#endif /* BARRIER_CHECKS */
}
-
+
H5Pclose(dcpl);
H5Sclose(filespace);
H5Sclose(memspace);
@@ -795,8 +795,8 @@ void collective_group_write(void)
HDfree(outme);
}
-/* Let two sets of processes open and read different groups and chunked
- * datasets independently.
+/* Let two sets of processes open and read different groups and chunked
+ * datasets independently.
*/
void independent_group_read(void)
{
@@ -812,16 +812,16 @@ void independent_group_read(void)
ngroups = pt->count;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
+
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
H5Pclose(plist);
- /* open groups and read datasets. Odd number processes read even number
- * groups from the end; even number processes read odd number groups
+ /* open groups and read datasets. Odd number processes read even number
+ * groups from the end; even number processes read odd number groups
* from the beginning. */
if(mpi_rank%2==0) {
- for(m=ngroups-1; m==0; m-=2)
+ for(m=ngroups-1; m==0; m-=2)
group_dataset_read(fid, mpi_rank, m);
} else {
for(m=0; m<ngroups; m+=2)
@@ -858,7 +858,7 @@ void group_dataset_read(hid_t fid, int mpi_rank, int m)
outdata = (DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
-
+
/* open every group under root group. */
sprintf(gname, "group%d", m);
gid = H5Gopen(fid, gname);
@@ -873,7 +873,7 @@ void group_dataset_read(hid_t fid, int mpi_rank, int m)
/* this is the original value */
for(i=0; i<size; i++)
- for(j=0; j<size; j++) {
+ for(j=0; j<size; j++) {
outdata[(i * size) + j] = (i+j)*1000 + mpi_rank;
}
@@ -889,19 +889,19 @@ void group_dataset_read(hid_t fid, int mpi_rank, int m)
}
/*
- * Example of using PHDF5 to create multiple groups. Under the root group,
- * it creates ngroups groups. Under the first group just created, it creates
- * recursive subgroups of depth GROUP_DEPTH. In each created group, it
+ * Example of using PHDF5 to create multiple groups. Under the root group,
+ * it creates ngroups groups. Under the first group just created, it creates
+ * recursive subgroups of depth GROUP_DEPTH. In each created group, it
* generates NDATASETS datasets. Each process write a hyperslab of an array
* into the file. The structure is like
- *
+ *
* root group
* |
* ---------------------------- ... ... ------------------------
* | | | ... ... | |
* group0*+' group1*+' group2*+' ... ... group ngroups*+'
* |
- * 1st_child_group*'
+ * 1st_child_group*'
* |
* 2nd_child_group*'
* |
@@ -948,32 +948,32 @@ void multiple_group_write(void)
H5Pclose(plist);
/* decide the hyperslab according to process number. */
- get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
/* select hyperslab in memory and file spaces. These two operations are
* identical since the datasets are the same. */
memspace = H5Screate_simple(DIM, file_dims, NULL);
VRFY((memspace>=0), "memspace");
- ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
+ ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
chunk_dims, count, chunk_dims);
VRFY((ret>=0), "mgroup memspace selection");
filespace = H5Screate_simple(DIM, file_dims, NULL);
VRFY((filespace>=0), "filespace");
- ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
chunk_dims, count, chunk_dims);
- VRFY((ret>=0), "mgroup filespace selection");
+ VRFY((ret>=0), "mgroup filespace selection");
- /* creates ngroups groups under the root group, writes datasets in
+ /* creates ngroups groups under the root group, writes datasets in
* parallel. */
for(m = 0; m < ngroups; m++) {
sprintf(gname, "group%d", m);
gid = H5Gcreate(fid, gname, 0);
VRFY((gid > 0), gname);
- /* create attribute for these groups. */
+ /* create attribute for these groups. */
write_attribute(gid, is_group, m);
-
+
if(m != 0)
write_dataset(memspace, filespace, gid);
@@ -986,13 +986,13 @@ void multiple_group_write(void)
}
#endif /* BARRIER_CHECKS */
}
-
+
/* recursively creates subgroups under the first group. */
gid = H5Gopen(fid, "group0");
create_group_recursive(memspace, filespace, gid, 0);
ret = H5Gclose(gid);
VRFY((ret>=0), "H5Gclose");
-
+
ret = H5Sclose(filespace);
VRFY((ret>=0), "H5Sclose");
ret = H5Sclose(memspace);
@@ -1001,7 +1001,7 @@ void multiple_group_write(void)
VRFY((ret>=0), "H5Fclose");
}
-/*
+/*
* In a group, creates NDATASETS datasets. Each process writes a hyperslab
* of a data array to the file.
*
@@ -1010,7 +1010,7 @@ void multiple_group_write(void)
* to function with an arbitrary number of processors.
*
* JRM - 8/16/04
- */
+ */
void write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
int i, j, n, size;
@@ -1018,7 +1018,7 @@ void write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
char dname[32];
DATATYPE * outme = NULL;
hid_t did;
-
+
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -1029,7 +1029,7 @@ void write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
for(n=0; n < NDATASET; n++) {
sprintf(dname, "dataset%d", n);
- did = H5Dcreate(gid, dname, H5T_NATIVE_INT, filespace,
+ did = H5Dcreate(gid, dname, H5T_NATIVE_INT, filespace,
H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1037,28 +1037,28 @@ void write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
for(j=0; j < size; j++)
outme[(i * size) + j] = n*1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
outme);
/* create attribute for these datasets.*/
write_attribute(did, is_dset, n);
-
+
H5Dclose(did);
}
HDfree(outme);
}
-/*
+/*
* Creates subgroups of depth GROUP_DEPTH recursively. Also writes datasets
* in parallel in each group.
*/
-void create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid,
+void create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid,
int counter)
-{
+{
hid_t child_gid;
int mpi_rank;
char gname[64];
-
+
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
#ifdef BARRIER_CHECKS
@@ -1067,23 +1067,23 @@ void create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid,
MPI_Barrier(MPI_COMM_WORLD);
}
#endif /* BARRIER_CHECKS */
-
- sprintf(gname, "%dth_child_group", counter+1);
+
+ sprintf(gname, "%dth_child_group", counter+1);
child_gid = H5Gcreate(gid, gname, 0);
VRFY((child_gid > 0), gname);
/* write datasets in parallel. */
- write_dataset(memspace, filespace, gid);
+ write_dataset(memspace, filespace, gid);
- if( counter < GROUP_DEPTH )
+ if( counter < GROUP_DEPTH )
create_group_recursive(memspace, filespace, child_gid, counter+1);
H5Gclose(child_gid);
}
-/*
+/*
* This function is to verify the data from multiple group testing. It opens
- * every dataset in every group and check their correctness.
+ * every dataset in every group and check their correctness.
*
* Changes: Updated function to use a dynamically calculated size,
* instead of the old SIZE #define. This should allow it
@@ -1125,22 +1125,22 @@ void multiple_group_read(void)
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims,
count, chunk_dims);
filespace = H5Screate_simple(DIM, file_dims, NULL);
- H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims,
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims,
count, chunk_dims);
-
+
/* open every group under root group. */
for(m=0; m<ngroups; m++) {
sprintf(gname, "group%d", m);
gid = H5Gopen(fid, gname);
VRFY((gid > 0), gname);
-
+
/* check the data. */
if(m != 0)
if( (error_num = read_dataset(memspace, filespace, gid))>0)
nerrors += error_num;
-
- /* check attribute.*/
- error_num = 0;
+
+ /* check attribute.*/
+ error_num = 0;
if( (error_num = read_attribute(gid, is_group, m))>0 )
nerrors += error_num;
@@ -1164,8 +1164,8 @@ void multiple_group_read(void)
}
-/*
- * This function opens all the datasets in a certain, checks the data using
+/*
+ * This function opens all the datasets in a certain, checks the data using
* dataset_vrfy function.
*
* Changes: Updated function to use a dynamically calculated size,
@@ -1197,12 +1197,12 @@ int read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
did = H5Dopen(gid, dname);
VRFY((did>0), dname);
- H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
+ H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
indata);
/* this is the original value */
for(i=0; i<size; i++)
- for(j=0; j<size; j++) {
+ for(j=0; j<size; j++) {
*outdata = n*1000 + mpi_rank;
outdata++;
}
@@ -1211,10 +1211,10 @@ int read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
/* compare the original value(outdata) to the value in file(indata).*/
vrfy_errors = check_value(indata, outdata, size);
- /* check attribute.*/
+ /* check attribute.*/
if( (attr_errors = read_attribute(did, is_dset, n))>0 )
- vrfy_errors += attr_errors;
-
+ vrfy_errors += attr_errors;
+
H5Dclose(did);
}
@@ -1224,11 +1224,11 @@ int read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
return vrfy_errors;
}
-/*
- * This recursive function opens all the groups in vertical direction and
+/*
+ * This recursive function opens all the groups in vertical direction and
* checks the data.
*/
-void recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid,
+void recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid,
int counter)
{
hid_t child_gid;
@@ -1237,7 +1237,7 @@ void recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid,
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
#ifdef BARRIER_CHECKS
- if((counter+1) % 10)
+ if((counter+1) % 10)
MPI_Barrier(MPI_COMM_WORLD);
#endif /* BARRIER_CHECKS */
@@ -1255,7 +1255,7 @@ void recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid,
/* Create and write attribute for a group or a dataset. For groups, attribute
* is a scalar datum; for dataset, it is a one-dimensional array.
- */
+ */
void write_attribute(hid_t obj_id, int this_type, int num)
{
hid_t sid, aid;
@@ -1264,7 +1264,7 @@ void write_attribute(hid_t obj_id, int this_type, int num)
char attr_name[32];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
+
if(this_type == is_group) {
sprintf(attr_name, "Group Attribute %d", num);
sid = H5Screate(H5S_SCALAR);
@@ -1279,7 +1279,7 @@ void write_attribute(hid_t obj_id, int this_type, int num)
attr_data[i] = i;
sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
aid = H5Acreate(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT);
- H5Awrite(aid, H5T_NATIVE_INT, attr_data);
+ H5Awrite(aid, H5T_NATIVE_INT, attr_data);
H5Aclose(aid);
H5Sclose(sid);
}
@@ -1293,21 +1293,21 @@ int read_attribute(hid_t obj_id, int this_type, int num)
hsize_t group_block[2]={1,1}, dset_block[2]={1, 8};
int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0;
char attr_name[32];
-
+
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
+
if(this_type == is_group) {
sprintf(attr_name, "Group Attribute %d", num);
aid = H5Aopen_name(obj_id, attr_name);
if(MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, &in_num);
- vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block,
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block,
&in_num, &num);
}
H5Aclose(aid);
}
else if(this_type == is_dset) {
- sprintf(attr_name, "Dataset Attribute %d", num);
+ sprintf(attr_name, "Dataset Attribute %d", num);
for(i=0; i<8; i++)
out_data[i] = i;
aid = H5Aopen_name(obj_id, attr_name);
@@ -1317,12 +1317,12 @@ int read_attribute(hid_t obj_id, int this_type, int num)
out_data);
}
H5Aclose(aid);
- }
-
+ }
+
return vrfy_errors;
}
-/* This functions compares the original data with the read-in data for its
+/* This functions compares the original data with the read-in data for its
* hyperslab part only by process ID.
*
* Changes: Modified function to use a passed in size parameter
@@ -1331,7 +1331,7 @@ int read_attribute(hid_t obj_id, int this_type, int num)
*
* JRM - 8/16/04
*/
-int check_value(DATATYPE *indata, DATATYPE *outdata, int size)
+int check_value(DATATYPE *indata, DATATYPE *outdata, int size)
{
int mpi_rank, mpi_size, err_num=0;
hsize_t i, j;
@@ -1349,7 +1349,7 @@ int check_value(DATATYPE *indata, DATATYPE *outdata, int size)
for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
if( *indata != *outdata )
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n", (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata, *indata);
+ printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n", (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata, *indata);
}
if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
printf("[more errors ...]\n");
@@ -1367,14 +1367,14 @@ int check_value(DATATYPE *indata, DATATYPE *outdata, int size)
* JRM - 8/11/04
*/
-void get_slab(hsize_t chunk_origin[],
- hsize_t chunk_dims[],
+void get_slab(hsize_t chunk_origin[],
+ hsize_t chunk_dims[],
hsize_t count[],
hsize_t file_dims[],
int size)
{
int mpi_rank, mpi_size;
-
+
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -1386,23 +1386,23 @@ void get_slab(hsize_t chunk_origin[],
chunk_dims[0] = size/mpi_size;
chunk_dims[1] = size;
}
- if(file_dims != NULL)
+ if(file_dims != NULL)
file_dims[0] = file_dims[1] = size;
- if(count != NULL)
+ if(count != NULL)
count[0] = count[1] = 1;
}
-/*
+/*
* This function is based on bug demonstration code provided by Thomas
- * Guignon (thomas.guignon@ifp.fr), and is intended to verify the
+ * Guignon (thomas.guignon@ifp.fr), and is intended to verify the
* correctness of my fix for that bug.
*
- * In essence, the bug appeared when at least one process attempted to
- * write a point selection -- for which collective I/O is not supported,
- * and at least one other attempted to write some other type of selection
- * for which collective I/O is supported.
+ * In essence, the bug appeared when at least one process attempted to
+ * write a point selection -- for which collective I/O is not supported,
+ * and at least one other attempted to write some other type of selection
+ * for which collective I/O is supported.
*
- * Since the processes did not compare notes before performing the I/O,
+ * Since the processes did not compare notes before performing the I/O,
* some would attempt collective I/O while others performed independent
* I/O. A hang resulted.
*
@@ -1420,7 +1420,7 @@ void io_mode_confusion(void)
/*
* HDF5 APIs definitions
*/
-
+
const int rank = 1;
const char *dataset_name = "IntArray";
@@ -1428,7 +1428,7 @@ void io_mode_confusion(void)
hid_t filespace, memspace; /* file and memory dataspace */
/* identifiers */
hsize_t dimsf[1]; /* dataset dimensions */
- int data[N] = {1}; /* pointer to data buffer to write */
+ int data[N] = {1}; /* pointer to data buffer to write */
hsize_t coord[N] = {0L,1L,2L,3L};
hsize_t start[1];
hsize_t stride[1];
@@ -1466,7 +1466,7 @@ void io_mode_confusion(void)
*/
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
mpi_rank, fcn_name);
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1529,7 +1529,7 @@ void io_mode_confusion(void)
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n",
+ HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n",
mpi_rank, fcn_name);
memspace = H5Screate_simple(rank, dimsf, NULL);
@@ -1538,7 +1538,7 @@ void io_mode_confusion(void)
if( mpi_rank == 0 ) {
-
+
if ( verbose )
HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n",
mpi_rank, fcn_name);
@@ -1556,19 +1556,19 @@ void io_mode_confusion(void)
status = H5Sselect_none(memspace);
VRFY(( status >= 0 ), "H5Sselect_none() failed");
-
+
}
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
mpi_rank, fcn_name);
MPI_Barrier(MPI_COMM_WORLD);
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n",
+ HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n",
mpi_rank, fcn_name);
filespace = H5Dget_space(dset_id);
@@ -1590,7 +1590,7 @@ void io_mode_confusion(void)
"%0d:%s: Calling H5Sselect_elements() -- set up hang?\n",
mpi_rank, fcn_name);
- status = H5Sselect_elements(filespace, H5S_SELECT_SET, N,
+ status = H5Sselect_elements(filespace, H5S_SELECT_SET, N,
(const hsize_t **)&coord);
VRFY(( status >= 0 ), "H5Sselect_elements() failed");
@@ -1611,7 +1611,7 @@ void io_mode_confusion(void)
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
mpi_rank, fcn_name);
MPI_Barrier(MPI_COMM_WORLD);
@@ -1626,7 +1626,7 @@ void io_mode_confusion(void)
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n",
+ HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n",
mpi_rank, fcn_name);
status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
@@ -1642,7 +1642,7 @@ void io_mode_confusion(void)
plist_id, data);
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n",
+ HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n",
mpi_rank, fcn_name, status);
VRFY(( status >= 0 ), "H5Dwrite() failed");
@@ -1652,7 +1652,7 @@ void io_mode_confusion(void)
*/
if ( verbose )
- HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n",
+ HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n",
mpi_rank, fcn_name);
status = H5Dclose(dset_id);
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index 29867c4..0d373d6 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -99,7 +99,7 @@ test_mpio_overlap_writes(char *filename)
mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
&mpi_stat);
VRFY((mrc==MPI_SUCCESS), "");
-
+
/* move the offset pointer to last byte written by all processes */
mpi_off += (mpi_size - 1 - mpi_rank) * stride;
@@ -116,7 +116,7 @@ test_mpio_overlap_writes(char *filename)
VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
mrc = MPI_Comm_free(&comm);
VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
-
+
/* sync with the other waiting processes */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after writes");
@@ -181,10 +181,10 @@ test_mpio_overlap_writes(char *filename)
* Print any failure as information only, not as an error so that this
* won't abort the remaining test or other separated tests.
*
- * Test if MPIO can write file from under 2GB to over 2GB and then
+ * Test if MPIO can write file from under 2GB to over 2GB and then
* from under 4GB to over 4GB.
* Each process writes 1MB in round robin fashion.
- * Then reads the file back in by reverse order, that is process 0
+ * Then reads the file back in by reverse order, that is process 0
* reads the data of process n-1 and vice versa.
*/
static int
@@ -321,7 +321,7 @@ test_mpio_gb_file(char *filename)
/* close file and free the communicator */
mrc = MPI_File_close(&fh);
VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
-
+
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after writes");
@@ -419,7 +419,7 @@ test_mpio_1wMr(char *filename, int special_request)
int mpi_err;
unsigned char writedata[DIMSIZE], readdata[DIMSIZE];
unsigned char expect_val;
- int i, irank;
+ int i, irank;
int nerrs = 0; /* number of errors */
int atomicity;
MPI_Offset mpi_off;
@@ -591,36 +591,36 @@ if (special_request & USEFSYNC){
return nerrs;
}
-/*
+/*
Function: test_mpio_derived_dtype
-Test Whether the Displacement of MPI derived datatype
-(+ File_set_view + MPI_write)works or not on this MPI-IO package
+Test Whether the Displacement of MPI derived datatype
+(+ File_set_view + MPI_write)works or not on this MPI-IO package
and this platform.
-1. Details for the test:
+1. Details for the test:
1) Create two derived datatypes with MPI_Type_hindexed:
- datatype1:
- count = 1, blocklens = 1, offsets = 0,
+ datatype1:
+ count = 1, blocklens = 1, offsets = 0,
base type = MPI_BYTE(essentially a char)
- datatype2:
- count = 1, blocklens = 1, offsets = 1(byte),
+ datatype2:
+ count = 1, blocklens = 1, offsets = 1(byte),
base type = MPI_BYTE
-
+
2) Using these two derived datatypes,
Build another derived datatype with MPI_Type_struct:
advtype: derived from datatype1 and datatype2
- advtype:
- count = 2, blocklens[0] = 1, blocklens[1]=1,
- offsets[0] = 0, offsets[1] = 1(byte),
- bas_type[0]=datatype1,
+ advtype:
+ count = 2, blocklens[0] = 1, blocklens[1]=1,
+ offsets[0] = 0, offsets[1] = 1(byte),
+ bas_type[0]=datatype1,
bas_type[1] = datatype2;
3) Setting MPI file view with advtype
4) Writing 2 bytes 1 to 2 using MPI_File_write to a file
5) File content:
-Supposed the file value of the file is 0(most machines indeed do so)
+Supposed the file value of the file is 0(most machines indeed do so)
and Fill value is embraced with "() in the following output:
Expected output should be:
1,0,2
@@ -629,14 +629,14 @@ Expected output should be:
However, at some platforms, for example, IBM AIX(at March 23rd, 2005):
the following values were obtained:
-1,2,0
+1,2,0
The problem is that the displacement of the second derived datatype(datatype2) which formed the final derived datatype(advtype)
has been put after the basic datatype(MPI_BYTE) of datatype2. This is a bug.
2. This test will verify whether the complicated derived datatype is working on
-the current platform.
+the current platform.
If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change
the configuration specific file of HDF5 so that we can change our configurationsetting to support collective IO for irregular selections.
@@ -651,7 +651,7 @@ static int test_mpio_derived_dtype(char *filename) {
char mpi_err_str[MPI_MAX_ERROR_STRING];
int mpi_err_strlen;
int mpi_err;
- int i;
+ int i;
int nerrors = 0; /* number of errors */
MPI_Datatype etype,filetype;
MPI_Datatype adv_filetype,bas_filetype[2];
@@ -673,9 +673,9 @@ static int test_mpio_derived_dtype(char *filename) {
for(i=0;i<2;i++)
buf[i] = i+1;
-
+
if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
- MPI_MODE_RDWR | MPI_MODE_CREATE,
+ MPI_MODE_RDWR | MPI_MODE_CREATE,
MPI_INFO_NULL, &fh))
!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
@@ -766,7 +766,7 @@ static int test_mpio_derived_dtype(char *filename) {
printf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
}
-
+
if((mpi_err = MPI_File_set_view(fh,0,MPI_BYTE,MPI_BYTE,"native",MPI_INFO_NULL))!= MPI_SUCCESS){
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
@@ -797,7 +797,7 @@ static int test_mpio_derived_dtype(char *filename) {
return 1;
}
-
+
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
return ret;
}
@@ -925,11 +925,11 @@ main(int argc, char **argv)
}else{
MPI_BANNER("MPIO complicated derived datatype test...");
ret_code = test_mpio_derived_dtype(filenames[0]);
-#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
+#ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
if(ret_code == -1) {
if(mpi_rank == 0) {
- printf("Complicated derived datatype is NOT working at this platform\n");
- printf("Go back to hdf5/config and find the corresponding\n");
+ printf("Complicated derived datatype is NOT working at this platform\n");
+ printf("Go back to hdf5/config and find the corresponding\n");
printf("configure-specific file (for example, powerpc-ibm-aix5.x) and add\n");
printf("hdf5_mpi_complex_derived_datatype_works=${hdf5_mpi_complex_derived_datatype-works='no'}\n");
printf(" at the end of the file.\n");
diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c
index 1649c13..0062ce6 100644
--- a/testpar/t_ph5basic.c
+++ b/testpar/t_ph5basic.c
@@ -88,7 +88,7 @@ test_fapl_mpio_dup(void)
VRFY((ret >= 0), "");
/* Case 1:
- * Free the created communicator and INFO object.
+ * Free the created communicator and INFO object.
* Check if the access property list is still valid and can return
* valid communicator and INFO object.
*/
@@ -247,7 +247,7 @@ test_fapl_mpiposix_dup(void)
VRFY((ret >= 0), "");
/* Case 1:
- * Free the created communicator object.
+ * Free the created communicator object.
* Check if the access property list is still valid and can return
* valid communicator object.
*/
@@ -290,7 +290,7 @@ test_fapl_mpiposix_dup(void)
VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
/* Case 3:
- * Close the property list and verify the retrieved communicator
+ * Close the property list and verify the retrieved communicator
* object is still valid.
*/
H5Pclose(acc_pl);
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index 55afa13..8c75a8d 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -13,7 +13,7 @@
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/*
+/*
This program will test irregular hyperslab selections with collective write and read.
The way to test whether collective write and read works is to use independent IO
output to verify the collective output.
@@ -24,15 +24,15 @@
2) We will read two datasets with the same hyperslab selection settings,
1. independent read to read independent output,
independent read to read collecive output,
- Compare the result,
+ Compare the result,
If the result is the same, then collective write succeeds.
2. collective read to read independent output,
independent read to read independent output,
Compare the result,
If the result is the same, then collective read succeeds.
- */
-
+ */
+
#include "hdf5.h"
#include "H5private.h"
#include "testphdf5.h"
@@ -45,7 +45,7 @@ static void coll_read_test(int chunk_factor);
/*-------------------------------------------------------------------------
* Function: coll_irregular_cont_write
*
- * Purpose: Test the collectively irregular hyperslab write in contiguous
+ * Purpose: Test the collectively irregular hyperslab write in contiguous
storage
*
* Return: Success: 0
@@ -72,7 +72,7 @@ coll_irregular_cont_write(void)
/*-------------------------------------------------------------------------
* Function: coll_irregular_cont_read
*
- * Purpose: Test the collectively irregular hyperslab read in contiguous
+ * Purpose: Test the collectively irregular hyperslab read in contiguous
storage
*
* Return: Success: 0
@@ -98,7 +98,7 @@ coll_irregular_cont_read(void)
/*-------------------------------------------------------------------------
* Function: coll_irregular_simple_chunk_write
*
- * Purpose: Test the collectively irregular hyperslab write in chunk
+ * Purpose: Test the collectively irregular hyperslab write in chunk
storage(1 chunk)
*
* Return: Success: 0
@@ -125,7 +125,7 @@ coll_irregular_simple_chunk_write(void)
/*-------------------------------------------------------------------------
* Function: coll_irregular_simple_chunk_read
*
- * Purpose: Test the collectively irregular hyperslab read in chunk
+ * Purpose: Test the collectively irregular hyperslab read in chunk
storage(1 chunk)
*
* Return: Success: 0
@@ -150,7 +150,7 @@ coll_irregular_simple_chunk_read(void)
/*-------------------------------------------------------------------------
* Function: coll_irregular_complex_chunk_write
*
- * Purpose: Test the collectively irregular hyperslab write in chunk
+ * Purpose: Test the collectively irregular hyperslab write in chunk
storage(4 chunks)
*
* Return: Success: 0
@@ -177,7 +177,7 @@ coll_irregular_complex_chunk_write(void)
/*-------------------------------------------------------------------------
* Function: coll_irregular_complex_chunk_read
*
- * Purpose: Test the collectively irregular hyperslab read in chunk
+ * Purpose: Test the collectively irregular hyperslab read in chunk
storage(1 chunk)
*
* Return: Success: 0
@@ -210,12 +210,12 @@ void coll_write_test(int chunk_factor)
hid_t mspaceid1, mspaceid, fspaceid,fspaceid1; /* Dataspace identifiers */
hid_t plist; /* Dataset property list identifier */
- hsize_t mdim1[] = {MSPACE1_DIM}; /* Dimension size of the first dataset
- (in memory) */
-
- hsize_t fsdim[] = {FSPACE_DIM1, FSPACE_DIM2};
+ hsize_t mdim1[] = {MSPACE1_DIM}; /* Dimension size of the first dataset
+ (in memory) */
+
+ hsize_t fsdim[] = {FSPACE_DIM1, FSPACE_DIM2};
/* Dimension sizes of the dataset (on disk) */
- hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
+ hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
dataset in memory when we
read selection from the
dataset on the disk */
@@ -230,8 +230,8 @@ void coll_write_test(int chunk_factor)
unsigned i,j;
int fillvalue = 0; /* Fill value for the dataset */
- int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
- int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
+ int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
+ int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
dataset */
int vector[MSPACE1_DIM];
@@ -246,7 +246,7 @@ void coll_write_test(int chunk_factor)
/* Obtain file name */
- filename = GetTestParameters();
+ filename = GetTestParameters();
/*
* Buffers' initialization.
@@ -256,15 +256,15 @@ void coll_write_test(int chunk_factor)
#if 0
acc_plist = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((acc_plist >= 0),"");
+ VRFY((acc_plist >= 0),"");
ret = H5Pset_fapl_mpio(acc_plist,comm,info);
VRFY((ret >= 0),"MPIO creation property list succeeded");
#endif
- acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
+ acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
VRFY((acc_plist >= 0),"");
-
+
/*
* Create a file.
*/
@@ -277,7 +277,7 @@ void coll_write_test(int chunk_factor)
plist = H5Pcreate(H5P_DATASET_CREATE);
VRFY((acc_plist >= 0),"");
- ret = H5Pset_fill_value(plist, H5T_NATIVE_INT, &fillvalue);
+ ret = H5Pset_fill_value(plist, H5T_NATIVE_INT, &fillvalue);
VRFY((ret >= 0),"Fill value creation property list succeeded");
if(chunk_factor != 0) {
@@ -287,7 +287,7 @@ void coll_write_test(int chunk_factor)
ret = H5Pset_chunk(plist, 2, chunk_dims);
VRFY((ret >= 0),"chunk creation property list succeeded");
}
- /*
+ /*
* Create dataspace for the dataset in the file.
*/
fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL);
@@ -303,35 +303,35 @@ void coll_write_test(int chunk_factor)
dataseti = H5Dcreate(file, "independ_write", H5T_NATIVE_INT, fspaceid, plist);
VRFY((dataseti >= 0),"dataset created succeeded");
/*
- * Select hyperslab for the dataset in the file, using 3x2 blocks,
+ * Select hyperslab for the dataset in the file, using 3x2 blocks,
* (4,3) stride and (1,4) count starting at the position (0,1)
for the first selection
*/
- start[0] = FHSTART0;
+ start[0] = FHSTART0;
start[1] = FHSTART1+mpi_rank*FHSTRIDE1*FHCOUNT1/mpi_size;
- stride[0] = FHSTRIDE0;
+ stride[0] = FHSTRIDE0;
stride[1] = FHSTRIDE1;
- count[0] = FHCOUNT0;
- count[1] = FHCOUNT1/mpi_size;
- block[0] = FHBLOCK0;
+ count[0] = FHCOUNT0;
+ count[1] = FHCOUNT1/mpi_size;
+ block[0] = FHBLOCK0;
block[1] = FHBLOCK1;
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
/*
- * Select hyperslab for the dataset in the file, using 3x2*4 blocks,
+ * Select hyperslab for the dataset in the file, using 3x2*4 blocks,
* stride 1 and (1,1) count starting at the position (4,0).
*/
- start[0] = SHSTART0;
+ start[0] = SHSTART0;
start[1] = SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank/mpi_size;
- stride[0] = SHSTRIDE0;
+ stride[0] = SHSTRIDE0;
stride[1] = SHSTRIDE1;
- count[0] = SHCOUNT0;
- count[1] = SHCOUNT1;
- block[0] = SHBLOCK0;
+ count[0] = SHCOUNT0;
+ count[1] = SHCOUNT1;
+ block[0] = SHBLOCK0;
block[1] = SHBLOCK1/mpi_size;
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
@@ -344,7 +344,7 @@ void coll_write_test(int chunk_factor)
VRFY((mspaceid1 >= 0),"memory dataspace created succeeded");
/*
- * Select hyperslab.
+ * Select hyperslab.
* We will use 48 elements of the vector buffer starting at the second element.
* Selected elements are 1 2 3 . . . 48
*/
@@ -356,26 +356,26 @@ void coll_write_test(int chunk_factor)
ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
-
+
ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);
VRFY((ret >= 0),"dataset independent write succeed");
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0),"");
+ VRFY((xfer_plist >= 0),"");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0),"MPIO data transfer property list succeed");
+ VRFY((ret >= 0),"MPIO data transfer property list succeed");
- ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, xfer_plist, vector);
+ ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, xfer_plist, vector);
/* ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);*/
- VRFY((ret >= 0),"dataset collective write succeed");
+ VRFY((ret >= 0),"dataset collective write succeed");
ret = H5Sclose(mspaceid1);
VRFY((ret >= 0),"");
- ret = H5Sclose(fspaceid);
+ ret = H5Sclose(fspaceid);
VRFY((ret >= 0),"");
-
+
/*
* Close dataset.
*/
@@ -383,7 +383,7 @@ void coll_write_test(int chunk_factor)
VRFY((ret >= 0),"");
ret = H5Dclose(dataseti);
VRFY((ret >= 0),"");
-
+
/*
* Close the file.
*/
@@ -408,14 +408,14 @@ void coll_write_test(int chunk_factor)
#if 0
acc_plist = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((acc_plist >= 0),"");
+ VRFY((acc_plist >= 0),"");
ret = H5Pset_fapl_mpio(acc_plist,comm,info);
VRFY((ret >= 0),"MPIO creation property list succeeded");
#endif
- acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
+ acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
VRFY((acc_plist >= 0),"");
-
+
file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist);
VRFY((file >= 0),"H5Fopen succeeded");
@@ -426,8 +426,8 @@ void coll_write_test(int chunk_factor)
VRFY((datasetc >= 0),"H5Dopen succeeded");
dataseti = H5Dopen(file,"independ_write");
VRFY((dataseti >= 0),"H5Dopen succeeded");
-
- /*
+
+ /*
* Get dataspace of the open dataset.
*/
fspaceid = H5Dget_space(datasetc);
@@ -437,16 +437,16 @@ void coll_write_test(int chunk_factor)
VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded");
-
- start[0] = RFFHSTART0;
+
+ start[0] = RFFHSTART0;
start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1/mpi_size;
- block[0] = RFFHBLOCK0;
+ block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
- stride[0] = RFFHSTRIDE0;
+ stride[0] = RFFHSTRIDE0;
stride[1] = RFFHSTRIDE1;
- count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1/mpi_size;
-
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1/mpi_size;
+
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
@@ -455,13 +455,13 @@ void coll_write_test(int chunk_factor)
/*start[0] = RFSHSTART0+mpi_rank*RFSHCOUNT1/mpi_size; */
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank/mpi_size;
- block[0] = RFSHBLOCK0;
- block[1] = RFSHBLOCK1;
- stride[0] = RFSHSTRIDE0;
+ start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank/mpi_size;
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
stride[1] = RFSHSTRIDE0;
- count[0] = RFSHCOUNT0;
- count[1] = RFSHCOUNT1/mpi_size;
+ count[0] = RFSHCOUNT0;
+ count[1] = RFSHCOUNT1/mpi_size;
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
@@ -473,60 +473,60 @@ void coll_write_test(int chunk_factor)
*/
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
- /*
+ /*
* Select two hyperslabs in memory. Hyperslabs has the same
* size and shape as the selected hyperslabs for the file dataspace.
*/
- start[0] = RMFHSTART0;
+ start[0] = RMFHSTART0;
start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1/mpi_size;
- block[0] = RMFHBLOCK0;
+ block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
- stride[0] = RMFHSTRIDE0;
+ stride[0] = RMFHSTRIDE0;
stride[1] = RMFHSTRIDE1;
- count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1/mpi_size;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1/mpi_size;
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
- start[0] = RMSHSTART0;
+ start[0] = RMSHSTART0;
start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1/mpi_size;
- block[0] = RMSHBLOCK0;
+ block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
- stride[0] = RMSHSTRIDE0;
+ stride[0] = RMSHSTRIDE0;
stride[1] = RMSHSTRIDE1;
- count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1/mpi_size;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1/mpi_size;
+
-
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
+ VRFY((ret >= 0),"hyperslab selection succeeded");
- /*
+ /*
* Initialize data buffer.
*/
for (i = 0; i < MSPACE_DIM1; i++) {
for (j = 0; j < MSPACE_DIM2; j++)
matrix_out[i][j] = 0;
}
-
+
/*
* Read data back to the buffer matrix_out.
*/
ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid,
H5P_DEFAULT, matrix_out);
- VRFY((ret >= 0),"H5D independent read succeed");
+ VRFY((ret >= 0),"H5D independent read succeed");
+
-
for (i = 0; i < MSPACE_DIM1; i++) {
for (j = 0; j < MSPACE_DIM2; j++)
matrix_out1[i][j] = 0;
}
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid,
H5P_DEFAULT, matrix_out1);
- VRFY((ret >= 0),"H5D independent read succeed");
+ VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
for (i = 0; i < MSPACE_DIM1; i++){
@@ -535,37 +535,37 @@ void coll_write_test(int chunk_factor)
if(ret < 0) break;
}
}
- VRFY((ret >= 0),"H5D contiguous irregular collective write succeed");
-
+ VRFY((ret >= 0),"H5D contiguous irregular collective write succeed");
+
/*
* Close memory file and memory dataspaces.
- */
+ */
ret = H5Sclose(mspaceid);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
ret = H5Sclose(fspaceid);
- VRFY((ret >= 0),"");
-
+ VRFY((ret >= 0),"");
+
/*
* Close dataset.
- */
+ */
ret = H5Dclose(dataseti);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
ret = H5Dclose(datasetc);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
/*
* Close property list
*/
ret = H5Pclose(acc_plist);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
+
-
/*
* Close the file.
- */
+ */
ret = H5Fclose(file);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
return ;
}
@@ -579,9 +579,9 @@ void coll_read_test(int chunk_factor)
hid_t file, dataseti; /* File and dataset identifiers */
hid_t mspaceid, fspaceid1; /* Dataspace identifiers */
hbool_t use_gpfs = FALSE;
-
+
/* Dimension sizes of the dataset (on disk) */
- hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
+ hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
dataset in memory when we
read selection from the
dataset on the disk */
@@ -594,8 +594,8 @@ void coll_read_test(int chunk_factor)
herr_t ret;
unsigned i,j;
- int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
- int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
+ int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
+ int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
dataset */
int mpi_size,mpi_rank;
@@ -609,7 +609,7 @@ void coll_read_test(int chunk_factor)
/* Obtain file name */
- filename = GetTestParameters();
+ filename = GetTestParameters();
/*
* Buffers' initialization.
@@ -623,13 +623,13 @@ void coll_read_test(int chunk_factor)
#if 0
acc_plist = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((acc_plist >= 0),"");
+ VRFY((acc_plist >= 0),"");
ret = H5Pset_fapl_mpio(acc_plist,comm,info);
VRFY((ret >= 0),"MPIO creation property list succeeded");
#endif
-
- acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
+
+ acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
VRFY((acc_plist >= 0),"");
file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist);
@@ -640,34 +640,34 @@ void coll_read_test(int chunk_factor)
*/
dataseti = H5Dopen(file,"independ_write");
VRFY((dataseti >= 0),"H5Dopen succeeded");
-
- /*
+
+ /*
* Get dataspace of the open dataset.
*/
fspaceid1 = H5Dget_space(dataseti);
VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded");
- start[0] = RFFHSTART0;
+ start[0] = RFFHSTART0;
start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1/mpi_size;
- block[0] = RFFHBLOCK0;
+ block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
- stride[0] = RFFHSTRIDE0;
+ stride[0] = RFFHSTRIDE0;
stride[1] = RFFHSTRIDE1;
- count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1/mpi_size;
-
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1/mpi_size;
+
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank/mpi_size;
- block[0] = RFSHBLOCK0;
- block[1] = RFSHBLOCK1;
- stride[0] = RFSHSTRIDE0;
+ start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank/mpi_size;
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
stride[1] = RFSHSTRIDE0;
- count[0] = RFSHCOUNT0;
+ count[0] = RFSHCOUNT0;
count[1] = RFSHCOUNT1/mpi_size;
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
@@ -679,58 +679,58 @@ void coll_read_test(int chunk_factor)
*/
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
- /*
+ /*
* Select two hyperslabs in memory. Hyperslabs has the same
* size and shape as the selected hyperslabs for the file dataspace.
*/
- start[0] = RMFHSTART0;
+ start[0] = RMFHSTART0;
start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1/mpi_size;
- block[0] = RMFHBLOCK0;
+ block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
- stride[0] = RMFHSTRIDE0;
+ stride[0] = RMFHSTRIDE0;
stride[1] = RMFHSTRIDE1;
- count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1/mpi_size;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1/mpi_size;
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
- start[0] = RMSHSTART0;
+ start[0] = RMSHSTART0;
start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1/mpi_size;
- block[0] = RMSHBLOCK0;
+ block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
- stride[0] = RMSHSTRIDE0;
+ stride[0] = RMSHSTRIDE0;
stride[1] = RMSHSTRIDE1;
- count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1/mpi_size;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1/mpi_size;
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0),"hyperslab selection succeeded");
+ VRFY((ret >= 0),"hyperslab selection succeeded");
- /*
+ /*
* Initialize data buffer.
*/
for (i = 0; i < MSPACE_DIM1; i++) {
for (j = 0; j < MSPACE_DIM2; j++)
matrix_out[i][j] = 0;
}
-
+
/*
* Read data back to the buffer matrix_out.
*/
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0),"");
+ VRFY((xfer_plist >= 0),"");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0),"MPIO data transfer property list succeed");
+ VRFY((ret >= 0),"MPIO data transfer property list succeed");
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
xfer_plist, matrix_out);
- VRFY((ret >= 0),"H5D collecive read succeed");
+ VRFY((ret >= 0),"H5D collecive read succeed");
ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0),"");
-
+ VRFY((ret >= 0),"");
+
for (i = 0; i < MSPACE_DIM1; i++) {
for (j = 0; j < MSPACE_DIM2; j++)
matrix_out1[i][j] = 0;
@@ -738,7 +738,7 @@ void coll_read_test(int chunk_factor)
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
H5P_DEFAULT, matrix_out1);
- VRFY((ret >= 0),"H5D independent read succeed");
+ VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
for (i = 0; i < MSPACE_DIM1; i++){
for (j = 0; j < MSPACE_DIM2; j++){
@@ -746,33 +746,33 @@ void coll_read_test(int chunk_factor)
if(ret < 0) break;
}
}
- VRFY((ret >= 0),"H5D contiguous irregular collective read succeed");
+ VRFY((ret >= 0),"H5D contiguous irregular collective read succeed");
/*
* Close memory file and memory dataspaces.
- */
+ */
ret = H5Sclose(mspaceid);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
ret = H5Sclose(fspaceid1);
- VRFY((ret >= 0),"");
-
+ VRFY((ret >= 0),"");
+
/*
* Close dataset.
- */
+ */
ret = H5Dclose(dataseti);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
/*
* Close property list
*/
ret = H5Pclose(acc_plist);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
+
-
/*
* Close the file.
- */
+ */
ret = H5Fclose(file);
- VRFY((ret >= 0),"");
+ VRFY((ret >= 0),"");
return ;
}
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index a87af91..2f75e51 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -70,7 +70,7 @@ void pause_proc(void)
/* mpi variables */
int mpi_size, mpi_rank;
- int mpi_namelen;
+ int mpi_namelen;
char mpi_name[MPI_MAX_PROCESSOR_NAME];
pid = getpid();
@@ -113,7 +113,7 @@ usage(void)
printf("\t-m<n_datasets>"
"\tset number of datasets for the multiple dataset test\n");
printf("\t-n<n_groups>"
- "\tset number of groups for the multiple group test\n");
+ "\tset number of groups for the multiple group test\n");
printf("\t-f <prefix>\tfilename prefix\n");
printf("\t-2\t\tuse Split-file together with MPIO\n");
printf("\t-p\t\tuse combo MPI-POSIX driver\n");
@@ -269,7 +269,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type,
if (l_facc_type == FACC_MPIO){
/* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), "");
return(ret_pl);
}
@@ -280,7 +280,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type,
mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
VRFY((mpio_pl >= 0), "");
/* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
VRFY((ret >= 0), "");
/* setup file access template */
@@ -353,66 +353,66 @@ int main(int argc, char **argv)
TestInit(argv[0], usage, parse_options);
/* Tests are generally arranged from least to most complexity... */
- AddTest("mpiodup", test_fapl_mpio_dup, NULL,
+ AddTest("mpiodup", test_fapl_mpio_dup, NULL,
"fapl_mpio duplicate", NULL);
- AddTest("posixdup", test_fapl_mpiposix_dup, NULL,
+ AddTest("posixdup", test_fapl_mpiposix_dup, NULL,
"fapl_mpiposix duplicate", NULL);
- AddTest("split", test_split_comm_access, NULL,
+ AddTest("split", test_split_comm_access, NULL,
"dataset using split communicators", PARATESTFILE);
- AddTest("idsetw", dataset_writeInd, NULL,
+ AddTest("idsetw", dataset_writeInd, NULL,
"dataset independent write", PARATESTFILE);
- AddTest("idsetr", dataset_readInd, NULL,
+ AddTest("idsetr", dataset_readInd, NULL,
"dataset independent read", PARATESTFILE);
- AddTest("cdsetw", dataset_writeAll, NULL,
+ AddTest("cdsetw", dataset_writeAll, NULL,
"dataset collective write", PARATESTFILE);
- AddTest("cdsetr", dataset_readAll, NULL,
+ AddTest("cdsetr", dataset_readAll, NULL,
"dataset collective read", PARATESTFILE);
- AddTest("eidsetw", extend_writeInd, NULL,
+ AddTest("eidsetw", extend_writeInd, NULL,
"extendible dataset independent write", PARATESTFILE);
- AddTest("eidsetr", extend_readInd, NULL,
+ AddTest("eidsetr", extend_readInd, NULL,
"extendible dataset independent read", PARATESTFILE);
- AddTest("ecdsetw", extend_writeAll, NULL,
+ AddTest("ecdsetw", extend_writeAll, NULL,
"extendible dataset collective write", PARATESTFILE);
- AddTest("ecdsetr", extend_readAll, NULL,
+ AddTest("ecdsetr", extend_readAll, NULL,
"extendible dataset collective read", PARATESTFILE);
- AddTest("eidsetw2", extend_writeInd2, NULL,
+ AddTest("eidsetw2", extend_writeInd2, NULL,
"extendible dataset independent write #2", PARATESTFILE);
#ifdef H5_HAVE_FILTER_DEFLATE
- AddTest("cmpdsetr", compress_readAll, NULL,
+ AddTest("cmpdsetr", compress_readAll, NULL,
"compressed dataset collective read", PARATESTFILE);
#endif /* H5_HAVE_FILTER_DEFLATE */
ndsets_params.name = PARATESTFILE;
ndsets_params.count = ndatasets;
- AddTest("ndsetw", multiple_dset_write, NULL,
+ AddTest("ndsetw", multiple_dset_write, NULL,
"multiple datasets write", &ndsets_params);
ngroups_params.name = PARATESTFILE;
ngroups_params.count = ngroups;
- AddTest("ngrpw", multiple_group_write, NULL,
+ AddTest("ngrpw", multiple_group_write, NULL,
"multiple groups write", &ngroups_params);
- AddTest("ngrpr", multiple_group_read, NULL,
+ AddTest("ngrpr", multiple_group_read, NULL,
"multiple groups read", &ngroups_params);
- AddTest("compact", compact_dataset, NULL,
+ AddTest("compact", compact_dataset, NULL,
"compact dataset test", PARATESTFILE);
collngroups_params.name = PARATESTFILE;
collngroups_params.count = ngroups;
- AddTest("cngrpw", collective_group_write, NULL,
+ AddTest("cngrpw", collective_group_write, NULL,
"collective group and dataset write", &collngroups_params);
- AddTest("ingrpr", independent_group_read, NULL,
+ AddTest("ingrpr", independent_group_read, NULL,
"independent group and dataset read", &collngroups_params);
/* By default, do not run big dataset. */
- AddTest("-bigdset", big_dataset, NULL,
+ AddTest("-bigdset", big_dataset, NULL,
"big dataset test", PARATESTFILE);
- AddTest("fill", dataset_fillvalue, NULL,
+ AddTest("fill", dataset_fillvalue, NULL,
"dataset fill value", PARATESTFILE);
#if 0
@@ -467,7 +467,7 @@ int main(int argc, char **argv)
AddTest("ccchunkr",
coll_irregular_complex_chunk_read,NULL,
"collective irregular complex chunk read",PARATESTFILE);
-
+
#if 0
if((mpi_size > 3) && MAINPROCESS) {
@@ -498,14 +498,14 @@ int main(int argc, char **argv)
#endif
- AddTest("null", null_dataset, NULL,
+ AddTest("null", null_dataset, NULL,
"null dataset test", PARATESTFILE);
io_mode_confusion_params.name = PARATESTFILE;
io_mode_confusion_params.count = 0; /* value not used */
- AddTest("I/Omodeconf", io_mode_confusion, NULL,
- "I/O mode confusion test -- hangs quickly on failure",
+ AddTest("I/Omodeconf", io_mode_confusion, NULL,
+ "I/O mode confusion test -- hangs quickly on failure",
&io_mode_confusion_params);
/* Display testing information */
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index b15779e..14e315e 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -60,94 +60,94 @@
/*Constants for MPI derived data type generated from span tree */
-#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */
-#define MSPACE1_DIM 27000 /* Dataset size in memory */
-#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */
-#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */
-#define FSPACE_DIM2 3600 /* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */
-#define MSPACE_RANK 2
-#define MSPACE_DIM1 9
-#define MSPACE_DIM2 3600
-#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
-#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/
-#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/
-#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
-#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/
-#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
-#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/
-
-#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
-#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/
-#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
-#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/
-#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/
-#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
-
-#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/
-#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-
-
-
-#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
-
-
-#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
-#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/
-#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/
-
-
-#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
-#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
-#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
-
-#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
-#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
-#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
-#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
-#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
-#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
-#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
-#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
-
-
-#define NPOINTS 4 /* Number of points that will be selected
- and overwritten */
+#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */
+#define MSPACE1_DIM 27000 /* Dataset size in memory */
+#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */
+#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */
+#define FSPACE_DIM2 3600 /* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */
+#define MSPACE_RANK 2
+#define MSPACE_DIM1 9
+#define MSPACE_DIM2 3600
+#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/
+#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/
+#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/
+#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/
+
+#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/
+#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/
+#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/
+#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/
+#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+
+
+
+#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+
+#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/
+#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/
+
+
+#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+
+#define NPOINTS 4 /* Number of points that will be selected
+ and overwritten */
/* Don't erase these lines, they are put here for debugging purposes */
/*
-#define MSPACE1_RANK 1
-#define MSPACE1_DIM 50
-#define MSPACE2_RANK 1
-#define MSPACE2_DIM 4
-#define FSPACE_RANK 2
-#define FSPACE_DIM1 8
-#define FSPACE_DIM2 12
-#define MSPACE_RANK 2
-#define MSPACE_DIM1 8
-#define MSPACE_DIM2 9
-#define NPOINTS 4
+#define MSPACE1_RANK 1
+#define MSPACE1_DIM 50
+#define MSPACE2_RANK 1
+#define MSPACE2_DIM 4
+#define FSPACE_RANK 2
+#define FSPACE_DIM1 8
+#define FSPACE_DIM2 12
+#define MSPACE_RANK 2
+#define MSPACE_DIM1 8
+#define MSPACE_DIM2 9
+#define NPOINTS 4
*/ /* end of debugging macro */