summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorChristian Chilan <chilan@hdfgroup.org>2009-04-17 22:04:15 (GMT)
committerChristian Chilan <chilan@hdfgroup.org>2009-04-17 22:04:15 (GMT)
commitcf8b28c95bacc9d50943306698a571c3339a5ae5 (patch)
treee399c7b758cce5c10a284ae0640ae791416e12ba /testpar
parent16ae3358576928bcb40871405f45122a033a57fd (diff)
downloadhdf5-cf8b28c95bacc9d50943306698a571c3339a5ae5.zip
hdf5-cf8b28c95bacc9d50943306698a571c3339a5ae5.tar.gz
hdf5-cf8b28c95bacc9d50943306698a571c3339a5ae5.tar.bz2
[svn-r16784] Adapted parallel tests to run with arbitrary number of processors.
Tested on jam and abe.
Diffstat (limited to 'testpar')
-rw-r--r--testpar/t_chunk_alloc.c190
-rw-r--r--testpar/t_coll_chunk.c18
-rw-r--r--testpar/t_span_tree.c115
-rw-r--r--testpar/testphdf5.c15
-rw-r--r--testpar/testphdf5.h6
5 files changed, 178 insertions, 166 deletions
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index 9d3ef4b..3802298 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -13,7 +13,7 @@
* access to either file, you may request a copy from help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/*
+/*
* This verifies if the storage space allocation methods are compatible between
* serial and parallel modes.
*
@@ -24,9 +24,9 @@
#include "testphdf5.h"
static int mpi_size, mpi_rank;
-#define DATASETNAME "ExtendibleArray"
-#define CHUNKSIZE 1000 /* #elements per chunk */
-#define DSETCHUNKS 20000
+#define DSET_NAME "ExtendibleArray"
+#define CHUNK_SIZE 1000 /* #elements per chunk */
+#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
#define CLOSE 1
#define NO_CLOSE 0
@@ -78,29 +78,28 @@ typedef enum access_ {
/*
- * This creates a dataset serially with 'nchunks' chunks, each of CHUNKSIZE
+ * This creates a dataset serially with chunks, each of CHUNK_SIZE
* elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another
* routine will open this in parallel for extension test.
*/
-void
-create_chunked_dataset(const char *filename, int nchunks, write_type write_pattern)
+static void
+create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern)
{
hid_t file_id, dataset; /* handles */
- hid_t dataspace,memspace;
+ hid_t dataspace,memspace;
hid_t cparms;
hsize_t dims[1];
hsize_t maxdims[1] = {H5S_UNLIMITED};
-
- hsize_t chunk_dims[1] ={CHUNKSIZE};
+
+ hsize_t chunk_dims[1] ={CHUNK_SIZE};
hsize_t count[1];
hsize_t stride[1];
hsize_t block[1];
hsize_t offset[1]; /* Selection offset within dataspace */
/* Variables used in reading data back */
- char buffer[CHUNKSIZE];
- int i;
-
- herr_t hrc;
+ char buffer[CHUNK_SIZE];
+ long nchunks;
+ herr_t hrc;
MPI_Offset filesize, /* actual file size */
est_filesize; /* estimated file size */
@@ -111,61 +110,49 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
/* Only MAINPROCESS should create the file. Others just wait. */
if (MAINPROCESS){
-
- dims[0]=nchunks*CHUNKSIZE;
+ nchunks=chunk_factor*mpi_size;
+ dims[0]=nchunks*CHUNK_SIZE;
/* Create the data space with unlimited dimensions. */
- dataspace = H5Screate_simple (1, dims, maxdims);
+ dataspace = H5Screate_simple (1, dims, maxdims);
VRFY((dataspace >= 0), "");
memspace = H5Screate_simple(1, chunk_dims, NULL);
VRFY((memspace >= 0), "");
-
+
/* Create a new file. If file exists its contents will be overwritten. */
file_id = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT,
H5P_DEFAULT);
VRFY((file_id >= 0), "H5Fcreate");
/* Modify dataset creation properties, i.e. enable chunking */
- cparms = H5Pcreate (H5P_DATASET_CREATE);
+ cparms = H5Pcreate(H5P_DATASET_CREATE);
VRFY((cparms >= 0), "");
hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
VRFY((hrc >= 0), "");
- hrc = H5Pset_chunk ( cparms, 1, chunk_dims);
+ hrc = H5Pset_chunk(cparms, 1, chunk_dims);
VRFY((hrc >= 0), "");
/* Create a new dataset within the file using cparms creation properties. */
- dataset = H5Dcreate (file_id, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, cparms);
+ dataset = H5Dcreate(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, cparms);
VRFY((dataset >= 0), "");
- switch (write_pattern) {
-
- /* writes only the second to last chunk */
- case sec_last:
+ if(write_pattern == sec_last) {
+ HDmemset(buffer, 100, CHUNK_SIZE);
- memset(buffer, 100, CHUNKSIZE);
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ offset[0] = (nchunks-2)*chunk_dims[0];
- count[0] = 1;
- stride[0] = 1;
- block[0] = chunk_dims[0];
- offset[0] = (nchunks-2)*chunk_dims[0];
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
- hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
- VRFY((hrc >= 0), "");
-
- /* Write sec_last chunk */
- hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
- VRFY((hrc >= 0), "H5Dwrite");
-
- break;
-
-
- /* doesn't write anything */
- case none:
-
- break;
- }
+ /* Write sec_last chunk */
+ hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+ } /* end if */
/* Close resources */
hrc = H5Dclose (dataset);
@@ -187,7 +174,7 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
/* verify file size */
filesize = get_filesize(filename);
- est_filesize = nchunks*CHUNKSIZE*sizeof(unsigned char);
+ est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char);
VRFY((filesize >= est_filesize), "file size check");
}
@@ -198,43 +185,46 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
*/
MPI_Barrier(MPI_COMM_WORLD);
-}
+}
/*
* This program performs three different types of parallel access. It writes on
- * the entire dataset, it extends the dataset to nchunks*CHUNKSIZE, and it only
+ * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only
* opens the dataset. At the end, it verifies the size of the dataset to be
- * consistent with argument 'nchunks'.
+ * consistent with argument 'chunk_factor'.
*/
-void
-parallel_access_dataset(const char *filename, int nchunks, access_type action, hid_t *file_id, hid_t *dataset)
+static void
+parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id, hid_t *dataset)
{
/* HDF5 gubbins */
hid_t memspace, dataspace; /* HDF5 file identifier */
hid_t access_plist; /* HDF5 ID for file access property list */
herr_t hrc; /* HDF5 return code */
hsize_t size[1];
- hsize_t dim_size;
- hsize_t chunk_dims[1] ={CHUNKSIZE};
+ hsize_t chunk_dims[1] ={CHUNK_SIZE};
hsize_t count[1];
hsize_t stride[1];
hsize_t block[1];
hsize_t offset[1]; /* Selection offset within dataspace */
+ hsize_t dims[1];
+ hsize_t maxdims[1];
+
/* Variables used in reading data back */
- char buffer[CHUNKSIZE];
+ char buffer[CHUNK_SIZE];
int i;
-
+ long nchunks;
/* MPI Gubbins */
MPI_Offset filesize, /* actual file size */
est_filesize; /* estimated file size */
- int mpierr;
/* Initialize MPI */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ nchunks=chunk_factor*mpi_size;
+
/* Set up MPIO file access property lists */
access_plist = H5Pcreate(H5P_FILE_ACCESS);
VRFY((access_plist >= 0), "");
@@ -250,29 +240,28 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
/* Open dataset*/
if (*dataset<0){
- *dataset = H5Dopen(*file_id, DATASETNAME);
+ *dataset = H5Dopen(*file_id, DSET_NAME);
VRFY((*dataset >= 0), "");
}
memspace = H5Screate_simple(1, chunk_dims, NULL);
VRFY((memspace >= 0), "");
- dataspace = H5Dget_space(*dataset);
+ dataspace = H5Dget_space(*dataset);
VRFY((dataspace >= 0), "");
- size[0] = nchunks*CHUNKSIZE;
+ size[0] = nchunks*CHUNK_SIZE;
switch (action) {
/* all chunks are written by all the processes in an interleaved way*/
case write_all:
- memset(buffer, mpi_rank+1, CHUNKSIZE);
+ memset(buffer, mpi_rank+1, CHUNK_SIZE);
count[0] = 1;
stride[0] = 1;
block[0] = chunk_dims[0];
- for (i=0; i<(nchunks+mpi_size-1)/mpi_size; i++){
- if (i*mpi_size+mpi_rank < nchunks){
+ for (i=0; i<nchunks/mpi_size; i++){
offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
@@ -281,21 +270,24 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
/* Write the buffer out */
hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
VRFY((hrc >= 0), "H5Dwrite");
- }
-
}
break;
/* only extends the dataset */
case extend_only:
- /* Extend dataset*/
- hrc = H5Dextend(*dataset, size);
+ /* check if new size is larger than old size */
+ hrc = H5Sget_simple_extent_dims(dataspace, dims, maxdims);
VRFY((hrc >= 0), "");
+ /* Extend dataset*/
+ if (size[0] > dims[0]) {
+ hrc = H5Dextend(*dataset, size);
+ VRFY((hrc >= 0), "");
+ }
break;
- /* only opens the dataset */
+ /* only opens the *dataset */
case open_only:
break;
@@ -307,10 +299,10 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
*dataset = -1;
hrc = H5Sclose (dataspace);
- VRFY((hrc >= 0), "");
+ VRFY((hrc >= 0), "");
- hrc = H5Sclose (memspace);
- VRFY((hrc >= 0), "");
+ hrc = H5Sclose (memspace);
+ VRFY((hrc >= 0), "");
hrc = H5Fclose(*file_id);
VRFY((hrc >= 0), "");
@@ -318,7 +310,7 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
/* verify file size */
filesize = get_filesize(filename);
- est_filesize = nchunks*CHUNKSIZE*sizeof(unsigned char);
+ est_filesize = nchunks*CHUNK_SIZE*sizeof(unsigned char);
VRFY((filesize >= est_filesize), "file size check");
/* Can close some plists */
@@ -340,32 +332,30 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
* 3. it returns correct values when the whole dataset has been written in an
* interleaved pattern.
*/
-void verify_data(const char *filename, int nchunks, write_type write_pattern, int close, hid_t *file_id, hid_t *dataset)
+static void
+verify_data(const char *filename, int chunk_factor, write_type write_pattern, int close, hid_t *file_id, hid_t *dataset)
{
/* HDF5 gubbins */
hid_t dataspace, memspace; /* HDF5 file identifier */
hid_t access_plist; /* HDF5 ID for file access property list */
herr_t hrc; /* HDF5 return code */
- hsize_t chunk_dims[1] ={CHUNKSIZE};
+ hsize_t chunk_dims[1] ={CHUNK_SIZE};
hsize_t count[1];
hsize_t stride[1];
hsize_t block[1];
hsize_t offset[1]; /* Selection offset within dataspace */
/* Variables used in reading data back */
- char buffer[CHUNKSIZE];
+ char buffer[CHUNK_SIZE];
int value, i;
- int index, current;
-
- /* MPI Gubbins */
- MPI_Offset filesize, /* actual file size */
- est_filesize; /* estimated file size */
- int mpierr;
-
+ int index;
+ long nchunks;
/* Initialize MPI */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ nchunks=chunk_factor*mpi_size;
+
/* Set up MPIO file access property lists */
access_plist = H5Pcreate(H5P_FILE_ACCESS);
VRFY((access_plist >= 0), "");
@@ -381,23 +371,23 @@ void verify_data(const char *filename, int nchunks, write_type write_pattern, in
/* Open dataset*/
if (*dataset<0){
- *dataset = H5Dopen(*file_id, DATASETNAME);
+ *dataset = H5Dopen(*file_id, DSET_NAME);
VRFY((*dataset >= 0), "");
}
memspace = H5Screate_simple(1, chunk_dims, NULL);
VRFY((memspace >= 0), "");
- dataspace = H5Dget_space(*dataset);
+ dataspace = H5Dget_space(*dataset);
VRFY((dataspace >= 0), "");
/* all processes check all chunks. */
count[0] = 1;
stride[0] = 1;
block[0] = chunk_dims[0];
- for (i=0; i<nchunks; i++){
+ for (i=0; i<nchunks; i++){
/* reset buffer values */
- memset(buffer, -1, CHUNKSIZE);
+ memset(buffer, -1, CHUNK_SIZE);
offset[0] = i*chunk_dims[0];
@@ -417,16 +407,15 @@ void verify_data(const char *filename, int nchunks, write_type write_pattern, in
value = 0;
break;
case sec_last:
- if (i==(nchunks-2))
+ if (i==nchunks-2)
value = 100;
else
value = 0;
}
/* verify content of the chunk */
- for (index = 0; index < CHUNKSIZE; index++)
+ for (index = 0; index < CHUNK_SIZE; index++)
VRFY((buffer[index] == value), "data verification");
-
}
hrc = H5Sclose (dataspace);
@@ -484,29 +473,34 @@ test_chunk_alloc(void)
hid_t file_id, dataset;
file_id = dataset = -1;
-
+
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+
filename = GetTestParameters();
if (VERBOSE_MED)
printf("Extend Chunked allocation test on file %s\n", filename);
/* Case 1 */
/* Create chunked dataset without writing anything.*/
- create_chunked_dataset(filename, DSETCHUNKS, none);
+ create_chunked_dataset(filename, CHUNK_FACTOR, none);
/* reopen dataset in parallel and check for file size */
- parallel_access_dataset(filename, DSETCHUNKS, open_only, &file_id, &dataset);
+ parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset);
/* reopen dataset in parallel, read and verify the data */
- verify_data(filename, DSETCHUNKS, none, CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
/* Case 2 sometimes fails. See bug 281 and 636. Skip it for now, need to fix it later. */
if (VERBOSE_LO){
printf("Started Case 2\n");
+
/* Case 2 */
/* Create chunked dataset without writing anything */
create_chunked_dataset(filename, 20, none);
/* reopen dataset in parallel and only extend it */
- parallel_access_dataset(filename, DSETCHUNKS, extend_only, &file_id, &dataset);
+ parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset);
/* reopen dataset in parallel, read and verify the data */
- verify_data(filename, DSETCHUNKS, none, CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
printf("Finished Case 2\n");
} else {
if (MAINPROCESS)
@@ -515,12 +509,12 @@ if (MAINPROCESS)
/* Case 3 */
/* Create chunked dataset and write in the second to last chunk */
- create_chunked_dataset(filename, DSETCHUNKS, sec_last);
+ create_chunked_dataset(filename, CHUNK_FACTOR, sec_last);
/* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/
- verify_data(filename, DSETCHUNKS, sec_last, NO_CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset);
/* All processes write in all the chunks in a interleaved way */
- parallel_access_dataset(filename, DSETCHUNKS, write_all, &file_id, &dataset);
+ parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset);
/* reopen dataset in parallel, read and verify the data */
- verify_data(filename, DSETCHUNKS, all, CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset);
}
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 93be771..76178bc 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -136,7 +136,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
/* setup dimensionality object */
- dims[0] = SPACE_DIM1;
+ dims[0] = SPACE_DIM1*mpi_size;
dims[1] = SPACE_DIM2;
/* each process takes a slab of rows
@@ -151,7 +151,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
*/
/* allocate memory for data buffer */
- data_array1 = (int *)malloc(SPACE_DIM1*SPACE_DIM2*sizeof(int));
+ data_array1 = (int *)malloc(dims[0]*dims[1]*sizeof(int));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* set up dimensions of the slab this process accesses */
@@ -164,7 +164,7 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
VRFY((crp_plist >= 0),"");
/* test1: chunk size is equal to dataset size */
- chunk_dims[0] = SPACE_DIM1/chunk_factor;
+ chunk_dims[0] = dims[0]/chunk_factor;
/* to decrease the testing time, maintain bigger chunk size */
if(chunk_factor >2) chunk_dims[1] = SPACE_DIM2/2;
@@ -221,11 +221,11 @@ coll_chunktest(const char* filename,int chunk_factor,int select_factor) {
/* Using read to verify the data inside the dataset is correct */
/* allocate memory for data buffer */
- data_array1 = (int *)malloc(SPACE_DIM1*SPACE_DIM2*sizeof(int));
+ data_array1 = (int *)malloc(dims[0]*dims[1]*sizeof(int));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* allocate memory for data buffer */
- data_origin1 = (int *)malloc(SPACE_DIM1*SPACE_DIM2*sizeof(int));
+ data_origin1 = (int *)malloc(dims[0]*dims[1]*sizeof(int));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
acc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
@@ -296,7 +296,7 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
block[1] = 1;
stride[0] = 1;
stride[1] = 1;
- count[0] = SPACE_DIM1/mpi_size;
+ count[0] = SPACE_DIM1;
count[1] = SPACE_DIM2;
start[0] = mpi_rank*count[0];
start[1] = 0;
@@ -309,16 +309,16 @@ ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
block[1] = 1;
stride[0] = 3;
stride[1] = 3;
- count[0] = (SPACE_DIM1/mpi_size)/(stride[0]*block[0]);
+ count[0] = (SPACE_DIM1)/(stride[0]*block[0]);
count[1] = (SPACE_DIM2)/(stride[1]*block[1]);
- start[0] = SPACE_DIM1/mpi_size*mpi_rank;
+ start[0] = SPACE_DIM1*mpi_rank;
start[1] = 0;
if (VERBOSE_MED) printf("slab_set BYROW_DISCONT\n");
break;
default:
/* Unknown mode. Set it to cover the whole dataset. */
printf("unknown slab_set mode (%d)\n", mode);
- block[0] = SPACE_DIM1;
+ block[0] = SPACE_DIM1*mpi_size;
block[1] = SPACE_DIM2;
stride[0] = block[0];
stride[1] = block[1];
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index d456963..2c330ba 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -209,8 +209,10 @@ void coll_write_test(int chunk_factor)
hbool_t use_gpfs = FALSE;
hid_t file, datasetc,dataseti; /* File and dataset identifiers */
hid_t mspaceid1, mspaceid, fspaceid,fspaceid1; /* Dataspace identifiers */
+ hsize_t mdim1[1],fsdim[2],mdim[2];
hid_t plist; /* Dataset property list identifier */
+#if 0
hsize_t mdim1[] = {MSPACE1_DIM}; /* Dimension size of the first dataset
(in memory) */
@@ -220,6 +222,7 @@ void coll_write_test(int chunk_factor)
dataset in memory when we
read selection from the
dataset on the disk */
+#endif
hsize_t start[2]; /* Start of hyperslab */
hsize_t stride[2]; /* Stride of hyperslab */
@@ -231,10 +234,14 @@ void coll_write_test(int chunk_factor)
unsigned i,j;
int fillvalue = 0; /* Fill value for the dataset */
+#if 0
int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
dataset */
int vector[MSPACE1_DIM];
+#endif
+
+ int *matrix_out, *matrix_out1, *vector;
int mpi_size,mpi_rank;
@@ -252,8 +259,19 @@ void coll_write_test(int chunk_factor)
/*
* Buffers' initialization.
*/
- vector[0] = vector[MSPACE1_DIM - 1] = -1;
- for (i = 1; i < MSPACE1_DIM - 1; i++) vector[i] = i;
+ mdim1[0] = MSPACE1_DIM *mpi_size;
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = MSPACE_DIM2*mpi_size;
+ fsdim[0] = FSPACE_DIM1;
+ fsdim[1] = FSPACE_DIM2*mpi_size;
+
+ vector = (int*)HDmalloc(sizeof(int)*mdim1[0]*mpi_size);
+ matrix_out = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size);
+ matrix_out1 = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size);
+
+ HDmemset(vector,0,sizeof(int)*mdim1[0]*mpi_size);
+ vector[0] = vector[MSPACE1_DIM*mpi_size - 1] = -1;
+ for (i = 1; i < MSPACE1_DIM*mpi_size - 1; i++) vector[i] = i;
#if 0
acc_plist = H5Pcreate(H5P_FILE_ACCESS);
@@ -283,8 +301,8 @@ void coll_write_test(int chunk_factor)
if(chunk_factor != 0) {
- chunk_dims[0] = FSPACE_DIM1/chunk_factor;
- chunk_dims[1] = FSPACE_DIM2/chunk_factor;
+ chunk_dims[0] = fsdim[0]/chunk_factor;
+ chunk_dims[1] = fsdim[1]/chunk_factor;
ret = H5Pset_chunk(plist, 2, chunk_dims);
VRFY((ret >= 0),"chunk creation property list succeeded");
}
@@ -310,11 +328,11 @@ void coll_write_test(int chunk_factor)
*/
start[0] = FHSTART0;
- start[1] = FHSTART1+mpi_rank*FHSTRIDE1*FHCOUNT1/mpi_size;
+ start[1] = FHSTART1+mpi_rank*FHSTRIDE1*FHCOUNT1;
stride[0] = FHSTRIDE0;
stride[1] = FHSTRIDE1;
count[0] = FHCOUNT0;
- count[1] = FHCOUNT1/mpi_size;
+ count[1] = FHCOUNT1;
block[0] = FHBLOCK0;
block[1] = FHBLOCK1;
@@ -327,13 +345,13 @@ void coll_write_test(int chunk_factor)
*/
start[0] = SHSTART0;
- start[1] = SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank/mpi_size;
+ start[1] = SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank;
stride[0] = SHSTRIDE0;
stride[1] = SHSTRIDE1;
count[0] = SHCOUNT0;
count[1] = SHCOUNT1;
block[0] = SHBLOCK0;
- block[1] = SHBLOCK1/mpi_size;
+ block[1] = SHBLOCK1;
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
@@ -351,7 +369,7 @@ void coll_write_test(int chunk_factor)
*/
start[0] = MHSTART0;
stride[0] = MHSTRIDE0;
- count[0] = MHCOUNT0/mpi_size;
+ count[0] = MHCOUNT0;
block[0] = MHBLOCK0;
ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
@@ -440,13 +458,13 @@ void coll_write_test(int chunk_factor)
start[0] = RFFHSTART0;
- start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1/mpi_size;
+ start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1;
block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
stride[0] = RFFHSTRIDE0;
stride[1] = RFFHSTRIDE1;
count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1/mpi_size;
+ count[1] = RFFHCOUNT1;
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
@@ -456,13 +474,13 @@ void coll_write_test(int chunk_factor)
/*start[0] = RFSHSTART0+mpi_rank*RFSHCOUNT1/mpi_size; */
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank/mpi_size;
+ start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank;
block[0] = RFSHBLOCK0;
block[1] = RFSHBLOCK1;
stride[0] = RFSHSTRIDE0;
stride[1] = RFSHSTRIDE0;
count[0] = RFSHCOUNT0;
- count[1] = RFSHCOUNT1/mpi_size;
+ count[1] = RFSHCOUNT1;
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
@@ -481,24 +499,24 @@ void coll_write_test(int chunk_factor)
start[0] = RMFHSTART0;
- start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1/mpi_size;
+ start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1;
block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
stride[0] = RMFHSTRIDE0;
stride[1] = RMFHSTRIDE1;
count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1/mpi_size;
+ count[1] = RMFHCOUNT1;
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
start[0] = RMSHSTART0;
- start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1/mpi_size;
+ start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1;
block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
stride[0] = RMSHSTRIDE0;
stride[1] = RMSHSTRIDE1;
count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1/mpi_size;
+ count[1] = RMSHCOUNT1;
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
@@ -507,10 +525,8 @@ void coll_write_test(int chunk_factor)
/*
* Initialize data buffer.
*/
- for (i = 0; i < MSPACE_DIM1; i++) {
- for (j = 0; j < MSPACE_DIM2; j++)
- matrix_out[i][j] = 0;
- }
+ HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
+ HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
/*
* Read data back to the buffer matrix_out.
@@ -521,21 +537,17 @@ void coll_write_test(int chunk_factor)
VRFY((ret >= 0),"H5D independent read succeed");
- for (i = 0; i < MSPACE_DIM1; i++) {
- for (j = 0; j < MSPACE_DIM2; j++)
- matrix_out1[i][j] = 0;
- }
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid,
H5P_DEFAULT, matrix_out1);
VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
- for (i = 0; i < MSPACE_DIM1; i++){
- for (j = 0; j < MSPACE_DIM2; j++){
- if(matrix_out[i][j]!=matrix_out1[i][j]) ret = -1;
+
+ for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){
+ if(matrix_out[i]!=matrix_out1[i]) ret = -1;
if(ret < 0) break;
}
- }
+
VRFY((ret >= 0),"H5D contiguous irregular collective write succeed");
/*
@@ -582,11 +594,13 @@ void coll_read_test(int chunk_factor)
hbool_t use_gpfs = FALSE;
/* Dimension sizes of the dataset (on disk) */
+#if 0
hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
dataset in memory when we
read selection from the
dataset on the disk */
-
+#endif
+ hsize_t mdim[2];
hsize_t start[2]; /* Start of hyperslab */
hsize_t stride[2]; /* Stride of hyperslab */
hsize_t count[2]; /* Block count */
@@ -595,10 +609,13 @@ void coll_read_test(int chunk_factor)
herr_t ret;
unsigned i,j;
+ int *matrix_out;
+ int *matrix_out1;
+#if 0
int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
dataset */
-
+#endif
int mpi_size,mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -615,6 +632,10 @@ void coll_read_test(int chunk_factor)
/*
* Buffers' initialization.
*/
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = MSPACE_DIM2*mpi_size;
+ matrix_out =(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
+ matrix_out1=(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
/*
* Open the file.
@@ -650,26 +671,26 @@ void coll_read_test(int chunk_factor)
VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded");
start[0] = RFFHSTART0;
- start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1/mpi_size;
+ start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1;
block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
stride[0] = RFFHSTRIDE0;
stride[1] = RFFHSTRIDE1;
count[0] = RFFHCOUNT0;
- count[1] = RFFHCOUNT1/mpi_size;
+ count[1] = RFFHCOUNT1;
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank/mpi_size;
+ start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank;
block[0] = RFSHBLOCK0;
block[1] = RFSHBLOCK1;
stride[0] = RFSHSTRIDE0;
stride[1] = RFSHSTRIDE0;
count[0] = RFSHCOUNT0;
- count[1] = RFSHCOUNT1/mpi_size;
+ count[1] = RFSHCOUNT1;
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
@@ -686,34 +707,32 @@ void coll_read_test(int chunk_factor)
*/
start[0] = RMFHSTART0;
- start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1/mpi_size;
+ start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1;
block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
stride[0] = RMFHSTRIDE0;
stride[1] = RMFHSTRIDE1;
count[0] = RMFHCOUNT0;
- count[1] = RMFHCOUNT1/mpi_size;
+ count[1] = RMFHCOUNT1;
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
start[0] = RMSHSTART0;
- start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1/mpi_size;
+ start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1;
block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
stride[0] = RMSHSTRIDE0;
stride[1] = RMSHSTRIDE1;
count[0] = RMSHCOUNT0;
- count[1] = RMSHCOUNT1/mpi_size;
+ count[1] = RMSHCOUNT1;
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
VRFY((ret >= 0),"hyperslab selection succeeded");
/*
* Initialize data buffer.
*/
- for (i = 0; i < MSPACE_DIM1; i++) {
- for (j = 0; j < MSPACE_DIM2; j++)
- matrix_out[i][j] = 0;
- }
+ HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
+ HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
/*
* Read data back to the buffer matrix_out.
@@ -732,20 +751,14 @@ void coll_read_test(int chunk_factor)
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0),"");
- for (i = 0; i < MSPACE_DIM1; i++) {
- for (j = 0; j < MSPACE_DIM2; j++)
- matrix_out1[i][j] = 0;
- }
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
H5P_DEFAULT, matrix_out1);
VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
- for (i = 0; i < MSPACE_DIM1; i++){
- for (j = 0; j < MSPACE_DIM2; j++){
- if(matrix_out[i][j]!=matrix_out1[i][j])ret = -1;
+ for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){
+ if(matrix_out[i]!=matrix_out1[i])ret = -1;
if(ret < 0) break;
- }
}
VRFY((ret >= 0),"H5D contiguous irregular collective read succeed");
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 4d4a03f..f33b99b 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -24,8 +24,8 @@
#endif /* !PATH_MAX */
/* global variables */
-int dim0 = DIM0;
-int dim1 = DIM1;
+int dim0;
+int dim1;
int chunkdim0;
int chunkdim1;
int nerrors = 0; /* errors count */
@@ -118,8 +118,8 @@ usage(void)
printf("\t-f <prefix>\tfilename prefix\n");
printf("\t-2\t\tuse Split-file together with MPIO\n");
printf("\t-p\t\tuse combo MPI-POSIX driver\n");
- printf("\t-d <dim0> <dim1>\tdataset dimensions. Defaults (%d,%d)\n",
- DIM0, DIM1);
+ printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
+ ROW_FACTOR, COL_FACTOR);
printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
printf("\n");
}
@@ -180,9 +180,9 @@ parse_options(int argc, char **argv)
nerrors++;
return(1);
}
- dim0 = atoi(*(++argv));
+ dim0 = atoi(*(++argv))*mpi_size;
argc--;
- dim1 = atoi(*(++argv));
+ dim1 = atoi(*(++argv))*mpi_size;
/* set default chunkdim sizes too */
chunkdim0 = (dim0+9)/10;
chunkdim1 = (dim1+9)/10;
@@ -324,6 +324,9 @@ int main(int argc, char **argv)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ dim0 = ROW_FACTOR*mpi_size;
+ dim1 = COL_FACTOR*mpi_size;
+
if (MAINPROCESS){
printf("===================================\n");
printf("PHDF5 TESTS START\n");
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 4a6ba2b..ecfacd7 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -93,6 +93,8 @@
/* Constants definitions */
#define DIM0 600 /* Default dataset sizes. */
#define DIM1 1200 /* Values are from a monitor pixel sizes */
+#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
+#define COL_FACTOR 16 /* Nominal column factor for dataset size */
#define RANK 2
#define DATASETNAME1 "Data1"
#define DATASETNAME2 "Data2"
@@ -114,8 +116,8 @@
#define FACC_MPIPOSIX 0x8 /* MPIPOSIX */
/*Constants for collective chunk definitions */
-#define SPACE_DIM1 5760
-#define SPACE_DIM2 3
+#define SPACE_DIM1 24
+#define SPACE_DIM2 4
#define BYROW_CONT 1
#define BYROW_DISCONT 2
#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"