diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2004-10-04 20:29:31 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2004-10-04 20:29:31 (GMT) |
commit | 74a448d084ac1e09be38fdf80c7b7298e3957ea4 (patch) | |
tree | 0f47590955318bf799c5c6d7cefeecc66fc9bebe /testpar | |
parent | e2f3ab0ab01044d515eb74b64d24a569d06e3d86 (diff) | |
download | hdf5-74a448d084ac1e09be38fdf80c7b7298e3957ea4.zip hdf5-74a448d084ac1e09be38fdf80c7b7298e3957ea4.tar.gz hdf5-74a448d084ac1e09be38fdf80c7b7298e3957ea4.tar.bz2 |
[svn-r9358] Purpose:
Bug fix
Description:
Relax restrictions on parallel I/O to allow compressed, chunked datasets
to be read in parallel (collective access will be degraded to independent
access, but will retrieve the information still).
Platforms tested:
FreeBSD 4.10 (sleipnir) w/parallel
Solaris 2.7 (arabica)
IRIX64 6.5 (modi4)
h5committest
Diffstat (limited to 'testpar')
-rw-r--r-- | testpar/t_coll_chunk.c | 11 | ||||
-rw-r--r-- | testpar/t_dset.c | 194 | ||||
-rw-r--r-- | testpar/t_file.c | 2 | ||||
-rw-r--r-- | testpar/t_mdset.c | 2 | ||||
-rw-r--r-- | testpar/t_ph5basic.c | 1 | ||||
-rw-r--r-- | testpar/testphdf5.c | 9 | ||||
-rw-r--r-- | testpar/testphdf5.h | 5 |
7 files changed, 189 insertions, 35 deletions
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index 494ae9b..1dc3c71 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -80,7 +80,7 @@ coll_chunk3(void) char *filename; int mpi_size; MPI_Comm comm = MPI_COMM_WORLD; - MPI_Comm_size(comm,&mpi_size); + MPI_Comm_size(comm,&mpi_size); filename = (char *) GetTestParameters(); coll_chunktest(filename,mpi_size,BYROW_CONT); @@ -92,7 +92,6 @@ coll_chunk4(void) char *filename; int mpi_size; - MPI_Comm comm = MPI_COMM_WORLD; MPI_Comm_size(comm,&mpi_size); filename = (char *) GetTestParameters(); @@ -352,16 +351,10 @@ ccslab_set(int mpi_rank, int mpi_size, hssize_t start[], hsize_t count[], /* Each process takes several disjoint blocks. */ block[0] = 1; block[1] = 1; - /* - stride[0] = 3; - stride[1] = 6; - count[0] = 2; - count[1] = 3; - */ stride[0] = 3; stride[1] = 3; count[0] = (SPACE_DIM1/mpi_size)/(stride[0]*block[0]); - count[1] =(SPACE_DIM2)/(stride[1]*block[1]); + count[1] = (SPACE_DIM2)/(stride[1]*block[1]); start[0] = SPACE_DIM1/mpi_size*mpi_rank; start[1] = 0; if (VERBOSE_MED) printf("slab_set BYROW_DISCONT\n"); diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 5ff0a6e..77cfdc4 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -232,7 +232,7 @@ dataset_writeInd(void) hbool_t use_gpfs = FALSE; /* Use GPFS hints */ hsize_t dims[RANK]; /* dataset dim sizes */ DATATYPE *data_array1 = NULL; /* data buffer */ - char *filename; + const char *filename; hssize_t start[RANK]; /* for hyperslab setting */ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ @@ -244,7 +244,7 @@ dataset_writeInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Independent write test on file %s\n", filename); @@ -378,7 +378,7 @@ dataset_readInd(void) hbool_t use_gpfs = FALSE; /* Use GPFS hints */ DATATYPE *data_array1 = NULL; /* data buffer */ DATATYPE *data_origin1 = NULL; /* expected data buffer */ - char *filename; + const char *filename; hssize_t start[RANK]; /* for hyperslab setting */ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ @@ -390,7 +390,7 @@ dataset_readInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Independent read test on file %s\n", filename); @@ -504,7 +504,7 @@ dataset_writeAll(void) hbool_t use_gpfs = FALSE; /* Use GPFS hints */ hsize_t dims[RANK]; /* dataset dim sizes */ DATATYPE *data_array1 = NULL; /* data buffer */ - char *filename; + const char *filename; hssize_t start[RANK]; /* for hyperslab setting */ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ @@ -516,7 +516,7 @@ dataset_writeAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Collective write test on file %s\n", filename); @@ -863,7 +863,7 @@ dataset_readAll(void) hbool_t use_gpfs = FALSE; /* Use GPFS hints */ DATATYPE *data_array1 = NULL; /* data buffer */ DATATYPE *data_origin1 = NULL; /* expected data buffer */ - char *filename; + const char *filename; hssize_t start[RANK]; /* for hyperslab setting */ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ @@ -875,7 +875,7 @@ dataset_readAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Collective read test on file %s\n", filename); @@ -1084,7 +1084,7 @@ extend_writeInd(void) hid_t mem_dataspace; /* memory dataspace ID */ hid_t dataset1, dataset2; /* Dataset ID */ hbool_t use_gpfs = FALSE; /* Use GPFS hints */ - char *filename; + const char *filename; hsize_t dims[RANK]; /* dataset dim sizes */ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ @@ -1103,7 +1103,7 @@ extend_writeInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Extend independent write test on file %s\n", filename); @@ -1308,7 +1308,7 @@ extend_writeInd(void) void extend_writeInd2(void) { - char *filename; + const char *filename; hid_t fid; /* HDF5 file ID */ hid_t fapl; /* File access templates */ hid_t fs; /* File dataspace ID */ @@ -1327,7 +1327,7 @@ extend_writeInd2(void) int i; /* Local index variable */ herr_t ret; /* Generic return value */ - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Extend independent write test #2 on file %s\n", filename); @@ -1481,7 +1481,7 @@ extend_readInd(void) DATATYPE *data_array1 = NULL; /* data buffer */ DATATYPE *data_array2 = NULL; /* data buffer */ DATATYPE *data_origin1 = NULL; /* expected data buffer */ - char *filename; + const char *filename; hssize_t start[RANK]; /* for hyperslab setting */ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ @@ -1493,7 +1493,7 @@ extend_readInd(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Extend independent read test on file %s\n", filename); @@ -1658,7 +1658,7 @@ extend_writeAll(void) hid_t mem_dataspace; /* memory dataspace ID */ hid_t dataset1, dataset2; /* Dataset ID */ hbool_t use_gpfs = FALSE; /* Use GPFS hints */ - char *filename; + const char *filename; hsize_t dims[RANK]; /* dataset dim sizes */ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ @@ -1677,7 +1677,7 @@ extend_writeAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Extend independent write test on file %s\n", filename); @@ -1899,7 +1899,7 @@ extend_readAll(void) hid_t mem_dataspace; /* memory dataspace ID */ hid_t dataset1, dataset2; /* Dataset ID */ hbool_t use_gpfs = FALSE; /* Use GPFS hints */ - char *filename; + const char *filename; hsize_t dims[RANK]; /* dataset dim sizes */ DATATYPE *data_array1 = NULL; /* data buffer */ DATATYPE *data_array2 = NULL; /* data buffer */ @@ -1915,7 +1915,7 @@ extend_readAll(void) MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; - filename = (char *) GetTestParameters(); + filename = GetTestParameters(); if (VERBOSE_MED) printf("Extend independent read test on file %s\n", filename); @@ -2070,3 +2070,161 @@ extend_readAll(void) if (data_array2) free(data_array2); if (data_origin1) free(data_origin1); } + +/* + * Example of using the parallel HDF5 library to read a compressed + * dataset in an HDF5 file with collective parallel access support. + */ + +#ifdef H5_HAVE_FILTER_DEFLATE +void +compress_readAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t dcpl; /* Dataset creation property list */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t dataspace; /* Dataspace ID */ + hid_t dataset; /* Dataset ID */ + int rank=1; /* Dataspace rank */ + hsize_t dim=dim0; /* Dataspace dimensions */ + unsigned u; /* Local index variable */ + hbool_t use_gpfs = FALSE; /* Use GPFS hints */ + DATATYPE *data_read = NULL; /* data buffer */ + DATATYPE *data_orig = NULL; /* expected data buffer */ + const char *filename; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + + filename = GetTestParameters(); + if (VERBOSE_MED) + printf("Collective chunked dataset read test on file %s\n", filename); + + /* Retrieve MPI parameters */ + MPI_Comm_size(comm,&mpi_size); + MPI_Comm_rank(comm,&mpi_rank); + + /* Allocate data buffer */ + data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE)); + VRFY((data_orig != NULL), "data_origin1 malloc succeeded"); + data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE)); + VRFY((data_read != NULL), "data_array1 malloc succeeded"); + + /* Initialize data buffers */ + for(u=0; u<dim;u++) + data_orig[u]=u; + + /* Process zero creates the file with a compressed, chunked dataset */ + if(mpi_rank==0) { + hsize_t chunk_dim; /* Chunk dimensions */ + + /* Create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((fid > 0), "H5Fcreate succeeded"); + + /* Create property list for chunking and compression */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl > 0), "H5Pcreate succeeded"); + + ret=H5Pset_layout(dcpl, H5D_CHUNKED); + VRFY((ret >= 0), "H5Pset_layout succeeded"); + + /* Use eight chunks */ + chunk_dim=dim/8; + ret=H5Pset_chunk(dcpl, rank, &chunk_dim); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + ret=H5Pset_deflate(dcpl, 9); + VRFY((ret >= 0), "H5Pset_deflate succeeded"); + + /* Create dataspace */ + dataspace = H5Screate_simple(rank, &dim, NULL); + VRFY((dataspace > 0), "H5Screate_simple succeeded"); + + /* Create dataset */ + dataset = H5Dcreate(fid, "compressed_data", H5T_NATIVE_INT, dataspace, dcpl); + VRFY((dataset > 0), "H5Screate_simple succeeded"); + + /* Write compressed data */ + ret=H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Close objects */ + ret=H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret=H5Sclose(dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret=H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret=H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + /* Wait for file to be created */ + MPI_Barrier(comm); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type, use_gpfs); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl); + VRFY((fid > 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret=H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + + /* Open dataset with compressed chunks */ + dataset = H5Dopen(fid, "compressed_data"); + VRFY((dataset > 0), "H5Dopen succeeded"); + + /* Try reading & writing data */ + if(dataset>0) { + /* Create dataset transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist > 0), "H5Pcreate succeeded"); + + ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Try reading the data */ + ret=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Verify data read */ + for(u=0; u<dim; u++) + if(data_orig[u]!=data_read[u]) { + printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__, + (unsigned)u,data_orig[u],(unsigned)u,data_read[u]); + nerrors++; + } + + /* Writing to the compressed, chunked dataset in parallel should fail */ + H5E_BEGIN_TRY { + ret=H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + } H5E_END_TRY; + VRFY((ret < 0), "H5Dwrite failed"); + + ret=H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret=H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + } /* end if */ + + ret=H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* release data buffers */ + if (data_read) HDfree(data_read); + if (data_orig) HDfree(data_orig); +} +#endif /* H5_HAVE_FILTER_DEFLATE */ + diff --git a/testpar/t_file.c b/testpar/t_file.c index c83a1af..6fcde32 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -12,8 +12,6 @@ * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* $Id$ */ - /* * Parallel tests for file operations */ diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index 6d8bd60..b23a26e 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -12,8 +12,6 @@ * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* $Id$ */ - #include "testphdf5.h" #define DIM 2 diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c index c373017..1649c13 100644 --- a/testpar/t_ph5basic.c +++ b/testpar/t_ph5basic.c @@ -11,7 +11,6 @@ * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have * * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -/* $Id$ */ /* * Test parallel HDF5 basic components diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index dbe3ecc..0f6624e 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -33,7 +33,7 @@ int ngroups = 512; /* number of groups to create in root * group. */ int facc_type = FACC_MPIO; /*Test file access type */ -H5E_auto_stack_t old_func; /* previous error handler */ +H5E_auto_t old_func; /* previous error handler */ void *old_client_data; /* previous error handler arg.*/ /* other option flags */ @@ -381,6 +381,11 @@ int main(int argc, char **argv) AddTest("eidsetw2", extend_writeInd2, NULL, "extendible dataset independent write #2", PARATESTFILE); +#ifdef H5_HAVE_FILTER_DEFLATE + AddTest("cmpdsetr", compress_readAll, NULL, + "compressed dataset collective read", PARATESTFILE); +#endif /* H5_HAVE_FILTER_DEFLATE */ + ndsets_params.name = PARATESTFILE; ndsets_params.count = ndatasets; AddTest("ndsetw", multiple_dset_write, NULL, @@ -428,6 +433,7 @@ int main(int argc, char **argv) AddTest("cchunk4", coll_chunk4,NULL, "collective to independent chunk io",PARATESTFILE); } + AddTest("null", null_dataset, NULL, "null dataset test", PARATESTFILE); @@ -438,7 +444,6 @@ int main(int argc, char **argv) "I/O mode confusion test -- hangs quickly on failure", &io_mode_confusion_params); - /* Display testing information */ TestInfo(argv[0]); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 877e88c..b66d196 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -133,7 +133,7 @@ typedef int DATATYPE; extern int dim0, dim1; /*Dataset dimensions */ extern int chunkdim0, chunkdim1; /*Chunk dimensions */ extern int nerrors; /*errors count */ -extern H5E_auto_stack_t old_func; /* previous error handler */ +extern H5E_auto_t old_func; /* previous error handler */ extern void *old_client_data; /*previous error handler arg.*/ extern int facc_type; /*Test file access type */ @@ -164,6 +164,9 @@ void coll_chunk2(void); void coll_chunk3(void); void coll_chunk4(void); void io_mode_confusion(void); +#ifdef H5_HAVE_FILTER_DEFLATE +void compress_readAll(void); +#endif /* H5_HAVE_FILTER_DEFLATE */ /* commonly used prototypes */ hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type, hbool_t use_gpfs); |