summaryrefslogtreecommitdiffstats
path: root/testpar/testphdf5.c
diff options
context:
space:
mode:
authorAlbert Cheng <acheng@hdfgroup.org>1998-04-13 04:35:49 (GMT)
committerAlbert Cheng <acheng@hdfgroup.org>1998-04-13 04:35:49 (GMT)
commit851b448c9d105dfd821b4fe388b272044d2d8ccd (patch)
tree77fcba4b741619d17ac1514d6906a7625dee94dd /testpar/testphdf5.c
parent33a49221fccc5adc7ec9c5a556be4f6cb72f4e34 (diff)
downloadhdf5-851b448c9d105dfd821b4fe388b272044d2d8ccd.zip
hdf5-851b448c9d105dfd821b4fe388b272044d2d8ccd.tar.gz
hdf5-851b448c9d105dfd821b4fe388b272044d2d8ccd.tar.bz2
[svn-r344] Added collective access tests to testphdf5.c.
Changed the data file names to *.h5f to avoid potential mixup with the split file convention.
Diffstat (limited to 'testpar/testphdf5.c')
-rw-r--r--testpar/testphdf5.c728
1 files changed, 597 insertions, 131 deletions
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 4fe9c01..2708bab 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -27,29 +27,50 @@
#define MESG(x)\
if (verbose) printf("%s\n", x);\
-#ifdef HAVE_PARALLEL
#define MPI_BANNER(mesg)\
{printf("--------------------------------\n");\
- printf("Proc %d: ", mympirank); \
+ printf("Proc %d: ", mpi_rank); \
printf("*** %s\n", mesg);\
printf("--------------------------------\n");}
-#else
-#define MPI_BANNER(mesg)\
- {printf("================================\n");\
- printf("*** %s\n", mesg);\
- printf("================================\n");}
-#endif
-#ifdef HAVE_PARALLEL
#define SYNC(comm)\
{MPI_BANNER("doing a SYNC"); MPI_Barrier(comm); MPI_BANNER("SYNC DONE");}
+/* End of Define some handy debugging shorthands, routines, ... */
+
+/* Constants definitions */
+/* 24 is a multiple of 2, 3, 4, 6, 8, 12. Neat for parallel tests. */
+#define SPACE1_DIM1 24
+#define SPACE1_DIM2 24
+#define SPACE1_RANK 2
+#define DATASETNAME1 "Data1"
+#define DATASETNAME2 "Data2"
+#define DATASETNAME3 "Data3"
+/* hyperslab layout styles */
+#define BYROW 1 /* divide into slabs of rows */
+#define BYCOL 2 /* divide into blocks of columns */
+
+
+/* dataset data type. Int's can be easily octo dumped. */
+typedef int DATATYPE;
+
+/* global variables */
+int nerrors = 0; /* errors count */
+
+int mpi_size, mpi_rank; /* mpi variables */
+
+/* option flags */
+int verbose = 0; /* verbose, default as no. */
+int doread=1; /* read test */
+int dowrite=1; /* write test */
+
+#ifdef USE_PAUSE
/* pause the process for a moment to allow debugger to attach if desired. */
/* Will pause more if greenlight file is not persent but will eventually */
/* continue. */
#include <sys/types.h>
#include <sys/stat.h>
-void pause_proc(MPI_Comm comm, int mympirank, char* processor_name, int namelen,
+void pause_proc(MPI_Comm comm, int mpi_rank, char* mpi_name, int mpi_namelen,
int argc, char **argv)
{
@@ -57,53 +78,69 @@ void pause_proc(MPI_Comm comm, int mympirank, char* processor_name, int namelen,
struct stat statbuf;
char greenlight[] = "go";
int maxloop = 10;
+ int loops = 0;
int time_int = 10;
+#ifdef DISABLED
/* check if an pause interval option is given */
if (--argc > 0 && isdigit(*++argv))
time_int = atoi(*argv);
+#endif
pid = getpid();
- printf("Proc %d (%*s): pid = %d\n",
- mympirank, namelen, processor_name, pid);
- if (mympirank == 0)
- while ((stat(greenlight, &statbuf) == -1) && maxloop-- > 0){
- printf("waiting(%ds) for file %s ...", time_int, greenlight);
+ if (mpi_rank == 0)
+ while ((stat(greenlight, &statbuf) == -1) && loops < maxloop){
+ if (!loops++){
+ printf("Proc %d (%*s, %d): You may attach %d for debugging.\n",
+ mpi_rank, mpi_namelen, mpi_name, pid, pid);
+ }
+ printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
fflush(stdout);
sleep(time_int);
}
MPI_Barrier(comm);
}
-#endif /*HAVE_PARALLEL*/
-/* End of Define some handy debugging shorthands, routines, ... */
-
-/* Constants definitions */
-/* 24 is a multiple of 2, 3, 4, 6, 8, 12. Neat for parallel tests. */
-#define SPACE1_DIM1 8
-#define SPACE1_DIM2 12
-#define SPACE1_RANK 2
-#define DATASETNAME1 "Data1"
-#define DATASETNAME2 "Data2"
-#define DATASETNAME3 "Data3"
-
-/* dataset data type. Int's can be easily octo dumped. */
-typedef int DATATYPE;
-
-/* global variables */
-char *filenames[]={
-#ifdef HAVE_PARALLEL
-"ParaEg1.h5f", "ParaEg2.h5f"
-#else
-"Eg1.h5f", "Eg2.h5f"
-#endif
-};
-
-int nerrors = 0; /* errors count */
+#endif /* USE_PAUSE */
-/* option flags */
-int verbose = 0; /* verbose, default as no. */
-int doread=1; /* read test */
-int dowrite=1; /* write test */
+/*
+ * Setup the dimensions of the hyperslab.
+ * Two modes--by rows or by columns.
+ * Assume dimension rank is 2.
+ */
+void
+slab_set(hssize_t start[], hsize_t count[], hsize_t stride[], int mode)
+{
+ switch (mode){
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE1_DIM1/mpi_size;
+ count[1] = SPACE1_DIM2;
+ start[0] = mpi_rank*count[0];
+ start[1] = 0;
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE1_DIM1;
+ count[1] = SPACE1_DIM2/mpi_size;
+ start[0] = 0;
+ start[1] = mpi_rank*count[1];
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ printf("unknown slab_set mode (%d)\n", mode);
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE1_DIM1;
+ count[1] = SPACE1_DIM2;
+ start[0] = 0;
+ start[1] = 0;
+ break;
+ }
+}
/*
@@ -111,7 +148,7 @@ int dowrite=1; /* write test */
* Assume dimension rank is 2 and data is stored contiguous.
*/
void
-dataset_data(int start[], size_t count[], DATATYPE * dataset)
+dataset_fill(hssize_t start[], hsize_t count[], hsize_t stride[], DATATYPE * dataset)
{
DATATYPE *dataptr = dataset;
int i, j;
@@ -119,7 +156,7 @@ dataset_data(int start[], size_t count[], DATATYPE * dataset)
/* put some trivial data in the data_array */
for (i=0; i < count[0]; i++){
for (j=0; j < count[1]; j++){
- *dataptr++ = (i+start[0])*100 + (j+1);
+ *dataptr++ = (i*stride[0]+start[0])*100 + (j*stride[1]+start[1]+1);
}
}
}
@@ -128,14 +165,14 @@ dataset_data(int start[], size_t count[], DATATYPE * dataset)
/*
* Print the content of the dataset.
*/
-void dataset_print(int start[], size_t count[], DATATYPE * dataset)
+void dataset_print(hssize_t start[], hsize_t count[], hsize_t stride[], DATATYPE * dataset)
{
DATATYPE *dataptr = dataset;
int i, j;
/* print the slab read */
for (i=0; i < count[0]; i++){
- printf("Row %d: ", i+start[0]);
+ printf("Row %ld: ", (long)i*stride[0]+start[0]);
for (j=0; j < count[1]; j++){
printf("%03d ", *dataptr++);
}
@@ -147,32 +184,50 @@ void dataset_print(int start[], size_t count[], DATATYPE * dataset)
/*
* Print the content of the dataset.
*/
-int dataset_vrfy(int start[], size_t count[], DATATYPE *dataset, DATATYPE *original)
+int dataset_vrfy(hssize_t start[], hsize_t count[], hsize_t stride[], DATATYPE *dataset, DATATYPE *original)
{
+#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
DATATYPE *dataptr = dataset;
DATATYPE *originptr = original;
int i, j, nerrors;
+ /* print it if verbose */
+ if (verbose)
+ dataset_print(start, count, stride, dataset);
+
nerrors = 0;
for (i=0; i < count[0]; i++){
for (j=0; j < count[1]; j++){
if (*dataset++ != *original++){
- printf("Dataset Verify failed at [%d][%d]: expect %d, got %d\n",
- i, j, *(dataset-1), *(original-1));
- nerrors++;
+ nerrors++;
+ if (nerrors <= MAX_ERR_REPORT){
+ printf("Dataset Verify failed at [%d][%d](row %d, col %d): expect %d, got %d\n",
+ i, j,
+ (int) i*stride[0]+start[0], (int) j*stride[1]+start[1],
+ *(dataset-1), *(original-1));
}
+ }
}
}
+ if (nerrors > MAX_ERR_REPORT)
+ printf("[more errors ...]\n");
if (nerrors)
printf("%d errors found in dataset_vrfy\n", nerrors);
return(nerrors);
}
-/* Example of using the parallel HDF5 library to create a dataset */
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 files with parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
+ * Each process controls only a slab of size DIM1 x DIM2 within each
+ * dataset.
+ */
+
void
-phdf5writeInd()
+phdf5writeInd(char *filename)
{
hid_t fid1, fid2; /* HDF5 file IDs */
hid_t acc_tpl1; /* File access templates */
@@ -181,52 +236,45 @@ phdf5writeInd()
hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2; /* Dataset ID */
int rank = SPACE1_RANK; /* Logical rank of dataspace */
- size_t dims1[SPACE1_RANK] = {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */
+ hsize_t dims1[SPACE1_RANK] =
+ {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */
+ hsize_t dimslocal1[SPACE1_RANK] =
+ {SPACE1_DIM1,SPACE1_DIM2}; /* local dataspace dim sizes */
DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
- int start[SPACE1_RANK]; /* for hyperslab setting */
- size_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+ hssize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
herr_t ret; /* Generic return value */
int i, j;
- int numprocs, mympirank;
+ int mpi_size, mpi_rank;
char *fname;
- int color = 0; /* used for MPI_Comm_split */
int mrc; /* mpi return code */
-#ifdef HAVE_PARALLEL
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- /* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
- MPI_Comm_rank(MPI_COMM_WORLD,&mympirank);
-#else
- numprocs = 1;
- mympirank = 0;
-#endif
+ if (verbose)
+ printf("Independent write test on file %s\n", filename);
-#ifdef NO
- /* split into two new communicators, one contains the originally */
- /* odd rank processes, the other the even ones. */
- color = mympirank%2;
- mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mympirank, &comm);
- assert(mrc==MPI_SUCCESS);
-#endif
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
- /* setup file access template */
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template with parallel IO access. */
acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
assert(acc_tpl1 != FAIL);
MESG("H5Pcreate access succeed");
-#ifdef HAVE_PARALLEL
/* set Independent Parallel access with communicator */
ret = H5Pset_mpi(acc_tpl1, comm, info, H5ACC_INDEPENDENT);
assert(ret != FAIL);
MESG("H5Pset_mpi succeed");
-#endif
/* create the file collectively */
- fid1=H5Fcreate(filenames[color],H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl1);
+ fid1=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl1);
assert(fid1 != FAIL);
MESG("H5Fcreate succeed");
@@ -235,6 +283,10 @@ phdf5writeInd()
assert(ret != FAIL);
+ /* --------------------------
+ * Define the dimensions of the overall datasets
+ * and the slabs local to the MPI process.
+ * ------------------------- */
/* setup dimensionality object */
sid1 = H5Screate_simple (SPACE1_RANK, dims1, NULL);
assert (sid1 != FAIL);
@@ -256,9 +308,9 @@ phdf5writeInd()
/* set up dimensions of the slab this process accesses */
- start[0] = mympirank*SPACE1_DIM1/numprocs;
+ start[0] = mpi_rank*SPACE1_DIM1/mpi_size;
start[1] = 0;
- count[0] = SPACE1_DIM1/numprocs;
+ count[0] = SPACE1_DIM1/mpi_size;
count[1] = SPACE1_DIM2;
stride[0] = 1;
stride[1] =1;
@@ -267,7 +319,7 @@ if (verbose)
start[0], start[1], count[0], count[1], count[0]*count[1]);
/* put some trivial data in the data_array */
- dataset_data(start, count, &data_array1[0][0]);
+ dataset_fill(start, count, stride, &data_array1[0][0]);
MESG("data_array initialized");
/* create a file dataspace independently */
@@ -314,7 +366,7 @@ if (verbose)
/* Example of using the parallel HDF5 library to read a dataset */
void
-phdf5readInd()
+phdf5readInd(char *filename)
{
hid_t fid1, fid2; /* HDF5 file IDs */
hid_t acc_tpl1; /* File access templates */
@@ -323,41 +375,38 @@ phdf5readInd()
hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2; /* Dataset ID */
int rank = SPACE1_RANK; /* Logical rank of dataspace */
- size_t dims1[] = {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */
+ hsize_t dims1[] = {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */
DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */
- int start[SPACE1_RANK]; /* for hyperslab setting */
- size_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+ hssize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
herr_t ret; /* Generic return value */
int i, j;
- int numprocs, mympirank;
-#ifdef HAVE_PARALLEL
+ int mpi_size, mpi_rank;
+
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
+ if (verbose)
+ printf("Independent read test on file %s\n", filename);
+
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
- MPI_Comm_rank(MPI_COMM_WORLD,&mympirank);
-#else
- numprocs = 1;
- mympirank = 0;
-#endif
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* setup file access template */
acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
assert(acc_tpl1 != FAIL);
-#ifdef HAVE_PARALLEL
/* set Independent Parallel access with communicator */
ret = H5Pset_mpi(acc_tpl1, comm, info, H5ACC_INDEPENDENT);
assert(ret != FAIL);
-#endif
/* open the file collectively */
- fid1=H5Fopen(filenames[0],H5F_ACC_RDWR,acc_tpl1);
+ fid1=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl1);
assert(fid1 != FAIL);
/* Release file-access template */
@@ -374,9 +423,9 @@ phdf5readInd()
/* set up dimensions of the slab this process accesses */
- start[0] = mympirank*SPACE1_DIM1/numprocs;
+ start[0] = mpi_rank*SPACE1_DIM1/mpi_size;
start[1] = 0;
- count[0] = SPACE1_DIM1/numprocs;
+ count[0] = SPACE1_DIM1/mpi_size;
count[1] = SPACE1_DIM2;
stride[0] = 1;
stride[1] =1;
@@ -395,7 +444,7 @@ if (verbose)
assert (mem_dataspace != FAIL);
/* fill dataset with test data */
- dataset_data(start, count, &data_origin1[0][0]);
+ dataset_fill(start, count, stride, &data_origin1[0][0]);
/* read data independently */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
@@ -403,7 +452,7 @@ if (verbose)
assert(ret != FAIL);
/* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, &data_array1[0][0], &data_origin1[0][0]);
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
assert(ret != FAIL);
/* read data independently */
@@ -412,7 +461,7 @@ if (verbose)
assert(ret != FAIL);
/* verify the read data with original expected data */
- ret = dataset_vrfy(start, count, &data_array1[0][0], &data_origin1[0][0]);
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
assert(ret == 0);
/* close dataset collectively */
@@ -428,7 +477,401 @@ if (verbose)
H5Fclose(fid1);
}
-#ifdef HAVE_PARALLEL
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
+ * Each process controls only a slab of size DIM1 x DIM2 within each
+ * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+phdf5writeAll(char *filename)
+{
+ hid_t fid1, fid2; /* HDF5 file IDs */
+ hid_t acc_tpl1; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid1,sid2; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ int rank = SPACE1_RANK; /* Logical rank of dataspace */
+ hsize_t dims1[SPACE1_RANK] =
+ {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */
+ DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
+
+ hssize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ if (verbose)
+ printf("Collective write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template with parallel IO access. */
+ acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
+ assert(acc_tpl1 != FAIL);
+ MESG("H5Pcreate access succeed");
+ /* set Independent Parallel access with communicator */
+ ret = H5Pset_mpi(acc_tpl1, comm, info, H5ACC_INDEPENDENT);
+ assert(ret != FAIL);
+ MESG("H5Pset_mpi succeed");
+
+ /* create the file collectively */
+ fid1=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl1);
+ assert(fid1 != FAIL);
+ MESG("H5Fcreate succeed");
+
+ /* Release file-access template */
+ ret=H5Pclose(acc_tpl1);
+ assert(ret != FAIL);
+
+
+ /* --------------------------
+ * Define the dimensions of the overall datasets
+ * and create the dataset
+ * ------------------------- */
+ /* setup dimensionality object */
+ sid1 = H5Screate_simple (SPACE1_RANK, dims1, NULL);
+ assert (sid1 != FAIL);
+ MESG("H5Screate_simple succeed");
+
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate(fid1, DATASETNAME1, H5T_NATIVE_INT, sid1, H5P_DEFAULT);
+ assert(dataset1 != FAIL);
+ MESG("H5Dcreate succeed");
+
+ /* create another dataset collectively */
+ dataset2 = H5Dcreate(fid1, DATASETNAME2, H5T_NATIVE_INT, sid1, H5P_DEFAULT);
+ assert(dataset2 != FAIL);
+ MESG("H5Dcreate 2 succeed");
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of rows. */
+ slab_set(start, count, stride, BYROW);
+if (verbose)
+ printf("start[]=(%d,%d), count[]=(%lu,%lu), total datapoints=%lu\n",
+ start[0], start[1], count[0], count[1], count[0]*count[1]);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret=H5Sset_hyperslab(file_dataspace, start, count, stride);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
+ assert (mem_dataspace != FAIL);
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, count, stride, &data_array1[0][0]);
+ MESG("data_array initialized");
+ if (verbose){
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret=H5Pset_xfer(xfer_plist, H5D_XFER_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dwrite succeed");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of columns. */
+ slab_set(start, count, stride, BYCOL);
+if (verbose)
+ printf("start[]=(%d,%d), count[]=(%lu,%lu), total datapoints=%lu\n",
+ start[0], start[1], count[0], count[1], count[0]*count[1]);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, count, stride, &data_array1[0][0]);
+ MESG("data_array initialized");
+ if (verbose){
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret=H5Sset_hyperslab(file_dataspace, start, count, stride);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
+ assert (mem_dataspace != FAIL);
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, count, stride, &data_array1[0][0]);
+ MESG("data_array initialized");
+ if (verbose){
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret=H5Pset_xfer(xfer_plist, H5D_XFER_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dwrite succeed");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+
+ /*
+ * All writes completed. Close datasets collectively
+ */
+ ret=H5Dclose(dataset1);
+ assert(ret != FAIL);
+ MESG("H5Dclose1 succeed");
+ ret=H5Dclose(dataset2);
+ assert(ret != FAIL);
+ MESG("H5Dclose2 succeed");
+
+ /* release all IDs created */
+ H5Sclose(sid1);
+
+ /* close the file collectively */
+ H5Fclose(fid1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x DIM1) x DIM2.
+ * Each process controls only a slab of size DIM1 x DIM2 within each
+ * dataset. [Note: not so yet. Datasets are of sizes DIM1xDIM2 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+phdf5readAll(char *filename)
+{
+ hid_t fid1, fid2; /* HDF5 file IDs */
+ hid_t acc_tpl1; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid1,sid2; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ int rank = SPACE1_RANK; /* Logical rank of dataspace */
+ hsize_t dims1[] = {SPACE1_DIM1,SPACE1_DIM2}; /* dataspace dim sizes */
+ DATATYPE data_array1[SPACE1_DIM1][SPACE1_DIM2]; /* data buffer */
+ DATATYPE data_origin1[SPACE1_DIM1][SPACE1_DIM2]; /* expected data buffer */
+
+ hssize_t start[SPACE1_RANK]; /* for hyperslab setting */
+ hsize_t count[SPACE1_RANK], stride[SPACE1_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ if (verbose)
+ printf("Collective read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template with parallel IO access. */
+ acc_tpl1 = H5Pcreate (H5P_FILE_ACCESS);
+ assert(acc_tpl1 != FAIL);
+ MESG("H5Pcreate access succeed");
+ /* set Independent Parallel access with communicator */
+ ret = H5Pset_mpi(acc_tpl1, comm, info, H5ACC_INDEPENDENT);
+ assert(ret != FAIL);
+ MESG("H5Pset_mpi succeed");
+
+ /* open the file collectively */
+ fid1=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl1);
+ assert(fid1 != FAIL);
+ MESG("H5Fopen succeed");
+
+ /* Release file-access template */
+ ret=H5Pclose(acc_tpl1);
+ assert(ret != FAIL);
+
+
+ /* --------------------------
+ * Open the datasets in it
+ * ------------------------- */
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen(fid1, DATASETNAME1);
+ assert(dataset1 != FAIL);
+ MESG("H5Dopen succeed");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen(fid1, DATASETNAME1);
+ assert(dataset2 != FAIL);
+ MESG("H5Dopen 2 succeed");
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of columns. */
+ slab_set(start, count, stride, BYCOL);
+if (verbose)
+ printf("start[]=(%d,%d), count[]=(%lu,%lu), total datapoints=%lu\n",
+ start[0], start[1], count[0], count[1], count[0]*count[1]);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret=H5Sset_hyperslab(file_dataspace, start, count, stride);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
+ assert (mem_dataspace != FAIL);
+
+ /* fill dataset with test data */
+ dataset_fill(start, count, stride, &data_origin1[0][0]);
+ MESG("data_array initialized");
+ if (verbose){
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret=H5Pset_xfer(xfer_plist, H5D_XFER_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dread succeed");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
+ assert(ret != FAIL);
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of rows. */
+ slab_set(start, count, stride, BYROW);
+if (verbose)
+ printf("start[]=(%d,%d), count[]=(%lu,%lu), total datapoints=%lu\n",
+ start[0], start[1], count[0], count[1], count[0]*count[1]);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ assert(file_dataspace != FAIL);
+ MESG("H5Dget_space succeed");
+ ret=H5Sset_hyperslab(file_dataspace, start, count, stride);
+ assert(ret != FAIL);
+ MESG("H5Sset_hyperslab succeed");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (SPACE1_RANK, count, NULL);
+ assert (mem_dataspace != FAIL);
+
+ /* fill dataset with test data */
+ dataset_fill(start, count, stride, &data_origin1[0][0]);
+ MESG("data_array initialized");
+ if (verbose){
+ MESG("data_array created");
+ dataset_print(start, count, stride, &data_array1[0][0]);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ assert(xfer_plist != FAIL);
+ ret=H5Pset_xfer(xfer_plist, H5D_XFER_COLLECTIVE);
+ assert(ret != FAIL);
+ MESG("H5Pcreate xfer succeed");
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ assert(ret != FAIL);
+ MESG("H5Dread succeed");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, &data_array1[0][0], &data_origin1[0][0]);
+ assert(ret != FAIL);
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+
+ /*
+ * All reads completed. Close datasets collectively
+ */
+ ret=H5Dclose(dataset1);
+ assert(ret != FAIL);
+ MESG("H5Dclose1 succeed");
+ ret=H5Dclose(dataset2);
+ assert(ret != FAIL);
+ MESG("H5Dclose2 succeed");
+
+ /* close the file collectively */
+ H5Fclose(fid1);
+}
+
/*
* test file access by communicator besides COMM_WORLD.
* Split COMM_WORLD into two, one (even_comm) contains the original
@@ -441,9 +884,9 @@ if (verbose)
* sooner or later due to barrier mixed up.
*/
void
-test_split_comm_access()
+test_split_comm_access(char *filenames[])
{
- int numprocs, myrank;
+ int mpi_size, myrank;
MPI_Comm comm;
MPI_Info info = MPI_INFO_NULL;
int color, mrc;
@@ -452,8 +895,12 @@ test_split_comm_access()
hid_t acc_tpl; /* File access properties */
herr_t ret; /* generic return value */
+ if (verbose)
+ printf("Independent write test on file %s %s\n",
+ filenames[0], filenames[1]);
+
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
color = myrank%2;
mrc = MPI_Comm_split (MPI_COMM_WORLD, color, myrank, &comm);
@@ -492,8 +939,10 @@ test_split_comm_access()
assert(mrc==MPI_SUCCESS);
}
}
-#endif
+/*
+ * Show command usage
+ */
void
usage()
{
@@ -506,22 +955,11 @@ usage()
}
-main(int argc, char **argv)
-{
- int numprocs, mympirank, namelen;
- char processor_name[MPI_MAX_PROCESSOR_NAME];
-
-#ifdef HAVE_PARALLEL
- MPI_Init(&argc,&argv);
- MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
- MPI_Comm_rank(MPI_COMM_WORLD,&mympirank);
- MPI_Get_processor_name(processor_name,&namelen);
-#ifdef USE_PAUSE
- pause_proc(MPI_COMM_WORLD, mympirank, processor_name, namelen, argc, argv);
-#endif
-#endif
-
- /* parse option */
+/*
+ * parse the command line options
+ */
+int
+parse_options(int argc, char **argv){
while (--argc){
if (**(++argv) != '-'){
break;
@@ -535,23 +973,53 @@ main(int argc, char **argv)
break;
default: usage();
nerrors++;
- goto finish;
+ return(1);
}
}
}
+ return(0);
+}
+
+
+main(int argc, char **argv)
+{
+ char *filenames[]={ "ParaEg1.h5f", "ParaEg2.h5f" };
+ int mpi_namelen;
+ char mpi_name[MPI_MAX_PROCESSOR_NAME];
+
+ MPI_Init(&argc,&argv);
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Get_processor_name(mpi_name,&mpi_namelen);
+ /* Make sure datasets can be divided into equal chunks by the processes */
+ if ((SPACE1_DIM1 % mpi_size) || (SPACE1_DIM2 % mpi_size)){
+ printf("DIM1(%d) and DIM2(%d) must be multiples of processes (%d)\n",
+ SPACE1_DIM1, SPACE1_DIM2, mpi_size);
+ nerrors++;
+ goto finish;
+ }
+
+#ifdef USE_PAUSE
+ pause_proc(MPI_COMM_WORLD, mpi_rank, mpi_name, mpi_namelen, argc, argv);
+#endif
+
+ if (parse_options(argc, argv) != 0)
+ goto finish;
if (dowrite){
-#ifdef HAVE_PARALLEL
MPI_BANNER("testing PHDF5 dataset using split communicators...");
- test_split_comm_access();
-#endif
+ test_split_comm_access(filenames);
MPI_BANNER("testing PHDF5 dataset independent write...");
- phdf5writeInd();
+ phdf5writeInd(filenames[0]);
+ MPI_BANNER("testing PHDF5 dataset collective write...");
+ phdf5writeAll(filenames[1]);
}
if (doread){
MPI_BANNER("testing PHDF5 dataset independent read...");
- phdf5readInd();
+ phdf5readInd(filenames[0]);
+ MPI_BANNER("testing PHDF5 dataset collective read...");
+ phdf5readAll(filenames[1]);
}
if (!(dowrite || doread)){
@@ -560,7 +1028,7 @@ main(int argc, char **argv)
}
finish:
- if (mympirank == 0){ /* only process 0 reports */
+ if (mpi_rank == 0){ /* only process 0 reports */
if (nerrors)
printf("***PHDF5 tests detected %d errors***\n", nerrors);
else{
@@ -569,9 +1037,7 @@ finish:
printf("===================================\n");
}
}
-#ifdef HAVE_PARALLEL
MPI_Finalize();
-#endif
return(nerrors);
}