summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2009-04-21 22:46:38 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2009-04-21 22:46:38 (GMT)
commit5bb857476f99118cda0e54ea522f52933a582747 (patch)
tree741024b69250fe9166091aa8f28bc78c35d3fb64 /testpar
parent35bbc743d4cf77d6aa8af2acf5578db02e5129ca (diff)
downloadhdf5-5bb857476f99118cda0e54ea522f52933a582747.zip
hdf5-5bb857476f99118cda0e54ea522f52933a582747.tar.gz
hdf5-5bb857476f99118cda0e54ea522f52933a582747.tar.bz2
[svn-r16825] Description:
Bring revisions 16636:16821 from trunk to revise_chunks branch Tested on: FreeBSD/32 6.3 (duty) Mac OS X/32 10.5.6 (amazon) (h5committest not needed on this branch)
Diffstat (limited to 'testpar')
-rw-r--r--testpar/Makefile.in8
-rw-r--r--testpar/t_chunk_alloc.c114
-rw-r--r--testpar/t_posix_compliant.c4
-rw-r--r--testpar/testphdf5.c17
-rw-r--r--testpar/testphdf5.h2
5 files changed, 81 insertions, 64 deletions
diff --git a/testpar/Makefile.in b/testpar/Makefile.in
index 9a275df..abfdf19 100644
--- a/testpar/Makefile.in
+++ b/testpar/Makefile.in
@@ -1,4 +1,4 @@
-# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# Makefile.in generated by automake 1.10.2 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
@@ -402,8 +402,8 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
- && exit 0; \
+ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+ && { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
@@ -508,7 +508,7 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
- $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+ $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
mkid -fID $$unique
tags: TAGS
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index a0cf0e2..4c581f6 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -24,9 +24,9 @@
#include "testphdf5.h"
static int mpi_size, mpi_rank;
-#define DATASETNAME "ExtendibleArray"
-#define CHUNKSIZE 1000 /* #elements per chunk */
-#define DSETCHUNKS 20000
+#define DSET_NAME "ExtendibleArray"
+#define CHUNK_SIZE 1000 /* #elements per chunk */
+#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
#define CLOSE 1
#define NO_CLOSE 0
@@ -78,12 +78,12 @@ typedef enum access_ {
/*
- * This creates a dataset serially with 'nchunks' chunks, each of CHUNKSIZE
+ * This creates a dataset serially with chunks, each of CHUNK_SIZE
* elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another
* routine will open this in parallel for extension test.
*/
static void
-create_chunked_dataset(const char *filename, int nchunks, write_type write_pattern)
+create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern)
{
hid_t file_id, dataset; /* handles */
hid_t dataspace,memspace;
@@ -91,14 +91,14 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
hsize_t dims[1];
hsize_t maxdims[1] = {H5S_UNLIMITED};
- hsize_t chunk_dims[1] ={CHUNKSIZE};
+ hsize_t chunk_dims[1] ={CHUNK_SIZE};
hsize_t count[1];
hsize_t stride[1];
hsize_t block[1];
hsize_t offset[1]; /* Selection offset within dataspace */
/* Variables used in reading data back */
- char buffer[CHUNKSIZE];
-
+ char buffer[CHUNK_SIZE];
+ long nchunks;
herr_t hrc;
MPI_Offset filesize, /* actual file size */
@@ -110,8 +110,8 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
/* Only MAINPROCESS should create the file. Others just wait. */
if (MAINPROCESS){
-
- dims[0]=nchunks*CHUNKSIZE;
+ nchunks=chunk_factor*mpi_size;
+ dims[0]=nchunks*CHUNK_SIZE;
/* Create the data space with unlimited dimensions. */
dataspace = H5Screate_simple (1, dims, maxdims);
VRFY((dataspace >= 0), "");
@@ -135,11 +135,11 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
VRFY((hrc >= 0), "");
/* Create a new dataset within the file using cparms creation properties. */
- dataset = H5Dcreate2(file_id, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
+ dataset = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
VRFY((dataset >= 0), "");
if(write_pattern == sec_last) {
- HDmemset(buffer, 100, CHUNKSIZE);
+ HDmemset(buffer, 100, CHUNK_SIZE);
count[0] = 1;
stride[0] = 1;
@@ -174,7 +174,7 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
/* verify file size */
filesize = get_filesize(filename);
- est_filesize = nchunks * CHUNKSIZE * sizeof(unsigned char);
+ est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char);
VRFY((filesize >= est_filesize), "file size check");
}
@@ -190,12 +190,12 @@ create_chunked_dataset(const char *filename, int nchunks, write_type write_patte
/*
* This program performs three different types of parallel access. It writes on
- * the entire dataset, it extends the dataset to nchunks*CHUNKSIZE, and it only
+ * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only
* opens the dataset. At the end, it verifies the size of the dataset to be
- * consistent with argument 'nchunks'.
+ * consistent with argument 'chunk_factor'.
*/
static void
-parallel_access_dataset(const char *filename, int nchunks, access_type action, hid_t *file_id, hid_t *dataset)
+parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id, hid_t *dataset)
{
/* HDF5 gubbins */
hid_t memspace, dataspace; /* HDF5 file identifier */
@@ -203,15 +203,18 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
herr_t hrc; /* HDF5 return code */
hsize_t size[1];
- hsize_t chunk_dims[1] ={CHUNKSIZE};
+ hsize_t chunk_dims[1] ={CHUNK_SIZE};
hsize_t count[1];
hsize_t stride[1];
hsize_t block[1];
hsize_t offset[1]; /* Selection offset within dataspace */
+ hsize_t dims[1];
+ hsize_t maxdims[1];
+
/* Variables used in reading data back */
- char buffer[CHUNKSIZE];
+ char buffer[CHUNK_SIZE];
int i;
-
+ long nchunks;
/* MPI Gubbins */
MPI_Offset filesize, /* actual file size */
est_filesize; /* estimated file size */
@@ -220,6 +223,8 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ nchunks=chunk_factor*mpi_size;
+
/* Set up MPIO file access property lists */
access_plist = H5Pcreate(H5P_FILE_ACCESS);
VRFY((access_plist >= 0), "");
@@ -235,7 +240,7 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
/* Open dataset*/
if (*dataset<0){
- *dataset = H5Dopen2(*file_id, DATASETNAME, H5P_DEFAULT);
+ *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
VRFY((*dataset >= 0), "");
}
@@ -245,19 +250,18 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
dataspace = H5Dget_space(*dataset);
VRFY((dataspace >= 0), "");
- size[0] = nchunks*CHUNKSIZE;
+ size[0] = nchunks*CHUNK_SIZE;
switch (action) {
/* all chunks are written by all the processes in an interleaved way*/
case write_all:
- memset(buffer, mpi_rank+1, CHUNKSIZE);
+ memset(buffer, mpi_rank+1, CHUNK_SIZE);
count[0] = 1;
stride[0] = 1;
block[0] = chunk_dims[0];
- for (i=0; i<(nchunks+mpi_size-1)/mpi_size; i++){
- if (i*mpi_size+mpi_rank < nchunks){
+ for (i=0; i<nchunks/mpi_size; i++){
offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
@@ -266,21 +270,24 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
/* Write the buffer out */
hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
VRFY((hrc >= 0), "H5Dwrite");
- }
-
}
break;
/* only extends the dataset */
case extend_only:
- /* Extend dataset*/
- hrc = H5Dset_extent(*dataset, size);
+ /* check if new size is larger than old size */
+ hrc = H5Sget_simple_extent_dims(dataspace, dims, maxdims);
VRFY((hrc >= 0), "");
+ /* Extend dataset*/
+ if (size[0] > dims[0]) {
+ hrc = H5Dset_extent(*dataset, size);
+ VRFY((hrc >= 0), "");
+ }
break;
- /* only opens the dataset */
+ /* only opens the *dataset */
case open_only:
break;
@@ -292,10 +299,10 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
*dataset = -1;
hrc = H5Sclose (dataspace);
- VRFY((hrc >= 0), "");
+ VRFY((hrc >= 0), "");
- hrc = H5Sclose (memspace);
- VRFY((hrc >= 0), "");
+ hrc = H5Sclose (memspace);
+ VRFY((hrc >= 0), "");
hrc = H5Fclose(*file_id);
VRFY((hrc >= 0), "");
@@ -303,7 +310,7 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
/* verify file size */
filesize = get_filesize(filename);
- est_filesize = nchunks*CHUNKSIZE*sizeof(unsigned char);
+ est_filesize = nchunks*CHUNK_SIZE*sizeof(unsigned char);
VRFY((filesize >= est_filesize), "file size check");
/* Can close some plists */
@@ -326,27 +333,29 @@ parallel_access_dataset(const char *filename, int nchunks, access_type action, h
* interleaved pattern.
*/
static void
-verify_data(const char *filename, int nchunks, write_type write_pattern, int close, hid_t *file_id, hid_t *dataset)
+verify_data(const char *filename, int chunk_factor, write_type write_pattern, int close, hid_t *file_id, hid_t *dataset)
{
/* HDF5 gubbins */
hid_t dataspace, memspace; /* HDF5 file identifier */
hid_t access_plist; /* HDF5 ID for file access property list */
herr_t hrc; /* HDF5 return code */
- hsize_t chunk_dims[1] ={CHUNKSIZE};
+ hsize_t chunk_dims[1] ={CHUNK_SIZE};
hsize_t count[1];
hsize_t stride[1];
hsize_t block[1];
hsize_t offset[1]; /* Selection offset within dataspace */
/* Variables used in reading data back */
- char buffer[CHUNKSIZE];
+ char buffer[CHUNK_SIZE];
int value, i;
int index;
-
+ long nchunks;
/* Initialize MPI */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ nchunks=chunk_factor*mpi_size;
+
/* Set up MPIO file access property lists */
access_plist = H5Pcreate(H5P_FILE_ACCESS);
VRFY((access_plist >= 0), "");
@@ -362,7 +371,7 @@ verify_data(const char *filename, int nchunks, write_type write_pattern, int clo
/* Open dataset*/
if (*dataset<0){
- *dataset = H5Dopen2(*file_id, DATASETNAME, H5P_DEFAULT);
+ *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
VRFY((*dataset >= 0), "");
}
@@ -378,7 +387,7 @@ verify_data(const char *filename, int nchunks, write_type write_pattern, int clo
block[0] = chunk_dims[0];
for (i=0; i<nchunks; i++){
/* reset buffer values */
- memset(buffer, -1, CHUNKSIZE);
+ memset(buffer, -1, CHUNK_SIZE);
offset[0] = i*chunk_dims[0];
@@ -398,16 +407,15 @@ verify_data(const char *filename, int nchunks, write_type write_pattern, int clo
value = 0;
break;
case sec_last:
- if (i==(nchunks-2))
+ if (i==nchunks-2)
value = 100;
else
value = 0;
}
/* verify content of the chunk */
- for (index = 0; index < CHUNKSIZE; index++)
+ for (index = 0; index < CHUNK_SIZE; index++)
VRFY((buffer[index] == value), "data verification");
-
}
hrc = H5Sclose (dataspace);
@@ -465,6 +473,10 @@ test_chunk_alloc(void)
hid_t file_id, dataset;
file_id = dataset = -1;
+
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
filename = GetTestParameters();
if (VERBOSE_MED)
@@ -472,28 +484,28 @@ test_chunk_alloc(void)
/* Case 1 */
/* Create chunked dataset without writing anything.*/
- create_chunked_dataset(filename, DSETCHUNKS, none);
+ create_chunked_dataset(filename, CHUNK_FACTOR, none);
/* reopen dataset in parallel and check for file size */
- parallel_access_dataset(filename, DSETCHUNKS, open_only, &file_id, &dataset);
+ parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset);
/* reopen dataset in parallel, read and verify the data */
- verify_data(filename, DSETCHUNKS, none, CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
/* Case 2 */
/* Create chunked dataset without writing anything */
create_chunked_dataset(filename, 20, none);
/* reopen dataset in parallel and only extend it */
- parallel_access_dataset(filename, DSETCHUNKS, extend_only, &file_id, &dataset);
+ parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset);
/* reopen dataset in parallel, read and verify the data */
- verify_data(filename, DSETCHUNKS, none, CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
/* Case 3 */
/* Create chunked dataset and write in the second to last chunk */
- create_chunked_dataset(filename, DSETCHUNKS, sec_last);
+ create_chunked_dataset(filename, CHUNK_FACTOR, sec_last);
/* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/
- verify_data(filename, DSETCHUNKS, sec_last, NO_CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset);
/* All processes write in all the chunks in a interleaved way */
- parallel_access_dataset(filename, DSETCHUNKS, write_all, &file_id, &dataset);
+ parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset);
/* reopen dataset in parallel, read and verify the data */
- verify_data(filename, DSETCHUNKS, all, CLOSE, &file_id, &dataset);
+ verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset);
}
diff --git a/testpar/t_posix_compliant.c b/testpar/t_posix_compliant.c
index 1be3e99..af0e6bc 100644
--- a/testpar/t_posix_compliant.c
+++ b/testpar/t_posix_compliant.c
@@ -819,11 +819,11 @@ main(int argc, char* argv[])
if(write_size == 0)
{
- lb = 1024;
+ lb = 16*numprocs*sizeof(int);
/* 1MB MPIO-IO overlapping is failing in copper. Lower it now pending
permenant fix for copper.*/
/* ub = 1024*1024;*/
- ub = 1024*512;
+ ub = lb*128;
inc = 4;
}
else
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 114ef49..175d159 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -24,8 +24,8 @@
#endif /* !PATH_MAX */
/* global variables */
-int dim0 = DIM0;
-int dim1 = DIM1;
+int dim0;
+int dim1;
int chunkdim0;
int chunkdim1;
int nerrors = 0; /* errors count */
@@ -119,8 +119,8 @@ usage(void)
printf("\t-f <prefix>\tfilename prefix\n");
printf("\t-2\t\tuse Split-file together with MPIO\n");
printf("\t-p\t\tuse combo MPI-POSIX driver\n");
- printf("\t-d <dim0> <dim1>\tdataset dimensions. Defaults (%d,%d)\n",
- DIM0, DIM1);
+ printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
+ ROW_FACTOR, COL_FACTOR);
printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
printf("\n");
}
@@ -138,6 +138,7 @@ parse_options(int argc, char **argv)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup default chunk-size. Make sure sizes are > 0 */
+
chunkdim0 = (dim0+9)/10;
chunkdim1 = (dim1+9)/10;
@@ -184,9 +185,9 @@ parse_options(int argc, char **argv)
nerrors++;
return(1);
}
- dim0 = atoi(*(++argv));
+ dim0 = atoi(*(++argv))*mpi_size;
argc--;
- dim1 = atoi(*(++argv));
+ dim1 = atoi(*(++argv))*mpi_size;
/* set default chunkdim sizes too */
chunkdim0 = (dim0+9)/10;
chunkdim1 = (dim1+9)/10;
@@ -325,6 +326,9 @@ int main(int argc, char **argv)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ dim0 = ROW_FACTOR*mpi_size;
+ dim1 = COL_FACTOR*mpi_size;
+
if (MAINPROCESS){
printf("===================================\n");
printf("PHDF5 TESTS START\n");
@@ -367,7 +371,6 @@ int main(int argc, char **argv)
"extendible dataset independent write #2", PARATESTFILE);
AddTest("selnone", none_selection_chunk, NULL,
"chunked dataset with none-selection", PARATESTFILE);
-
AddTest("calloc", test_chunk_alloc, NULL,
"parallel extend Chunked allocation on serial file", PARATESTFILE);
AddTest("fltread", test_filter_read, NULL,
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 81ad0f5..24c4432 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -36,6 +36,8 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
/* Constants definitions */
#define DIM0 600 /* Default dataset sizes. */
#define DIM1 1200 /* Values are from a monitor pixel sizes */
+#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
+#define COL_FACTOR 16 /* Nominal column factor for dataset size */
#define RANK 2
#define DATASETNAME1 "Data1"
#define DATASETNAME2 "Data2"