summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--release_docs/RELEASE.txt2
-rw-r--r--src/H5D.c4
-rw-r--r--src/H5Dio.c2
-rw-r--r--src/H5Distore.c28
-rw-r--r--src/H5Dmpio.c15
-rw-r--r--src/H5Dpkg.h4
-rw-r--r--testpar/t_coll_chunk.c15
-rw-r--r--testpar/t_dset.c194
-rw-r--r--testpar/t_file.c2
-rw-r--r--testpar/t_mdset.c291
-rw-r--r--testpar/t_ph5basic.c1
-rw-r--r--testpar/testphdf5.c15
-rw-r--r--testpar/testphdf5.h6
13 files changed, 515 insertions, 64 deletions
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index af1637f..ad67b36 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -46,6 +46,8 @@ New Features
Parallel Library:
-----------------
+ - Allow compressed, chunked datasets to be read in parallel.
+ QAK - 2004/10/04
Tools:
------
diff --git a/src/H5D.c b/src/H5D.c
index 5b2fccc..ea9e613 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -2537,10 +2537,6 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
if(H5P_set(plist, H5D_CRT_DATA_PIPELINE_NAME, &pline) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set pipeline")
- /* If MPI VFD is used, no filter support yet. */
- if(IS_H5FD_MPI(dataset->ent.file) && pline.nused > 0)
- HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "Parallel IO does not support filters yet")
-
/*
* Get the raw data layout info. It's actually stored in two locations:
* the storage message of the dataset (dataset->storage) and certain
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 9d5226f..d61ea11 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -3266,7 +3266,7 @@ H5D_ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/*
* Check if we can set direct MPI-IO read/write functions
*/
- opt=H5D_mpio_opt_possible(dset->ent.file,mem_space,file_space,flags,&dset->shared->layout);
+ opt=H5D_mpio_opt_possible(dset,mem_space,file_space,flags);
if(opt==FAIL)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, NULL, "invalid check for direct IO dataspace ");
diff --git a/src/H5Distore.c b/src/H5Distore.c
index 9d991d3..3673cad 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -1922,17 +1922,11 @@ HDfprintf(stderr,"%s: buf=%p\n",FUNC,buf);
* writing to other elements in the same chunk. Do a direct
* read-through of only the elements requested.
*/
- if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
- || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file)))) {
+ if (dset->shared->dcpl_cache.pline.nused==0 && ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && chunk_addr!=HADDR_UNDEF)
+ || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file))))) {
H5D_io_info_t chk_io_info; /* Temporary I/O info object */
H5D_storage_t chk_store; /* Chunk storage information */
-#ifdef H5_HAVE_PARALLEL
- /* Additional sanity check when operating in parallel */
- if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0)
- HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
-#endif /* H5_HAVE_PARALLEL */
-
/* Set up the storage information for the chunk */
chk_store.contig.dset_addr=chunk_addr;
chk_store.contig.dset_size=(hsize_t)dset->shared->layout.u.chunk.size;
@@ -2125,17 +2119,21 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* writing to other elements in the same chunk. Do a direct
* write-through of only the elements requested.
*/
- if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
- || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file)))) {
- H5D_io_info_t chk_io_info; /* Temporary I/O info object */
- H5D_storage_t chk_store; /* Chunk storage information */
-
#ifdef H5_HAVE_PARALLEL
- /* Additional sanity check when operating in parallel */
- if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0)
+ /* Additional sanity checks when operating in parallel */
+ if(IS_H5FD_MPI(dset->ent.file)) {
+ if (chunk_addr==HADDR_UNDEF)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
+ if (dset->shared->dcpl_cache.pline.nused>0)
+ HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "cannot write to chunked storage with filters in parallel");
+ } /* end if */
#endif /* H5_HAVE_PARALLEL */
+ if (dset->shared->dcpl_cache.pline.nused==0 && ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && chunk_addr!=HADDR_UNDEF)
+ || (IS_H5FD_MPI(dset->ent.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->ent.file))))) {
+ H5D_io_info_t chk_io_info; /* Temporary I/O info object */
+ H5D_storage_t chk_store; /* Chunk storage information */
+
/* Set up the storage information for the chunk */
chk_store.contig.dset_addr=chunk_addr;
chk_store.contig.dset_size=(hsize_t)dset->shared->layout.u.chunk.size;
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 37a49ca..b96d32e 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -67,7 +67,7 @@ H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size,
*-------------------------------------------------------------------------
*/
htri_t
-H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *file_space, const unsigned flags,const H5O_layout_t *layout)
+H5D_mpio_opt_possible( const H5D_t *dset, const H5S_t *mem_space, const H5S_t *file_space, const unsigned flags)
{
htri_t c1,c2; /* Flags whether a selection is optimizable */
htri_t ret_value=TRUE;
@@ -75,6 +75,7 @@ H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *f
FUNC_ENTER_NOAPI(H5D_mpio_opt_possible, FAIL);
/* Check args */
+ assert(dset);
assert(mem_space);
assert(file_space);
@@ -120,10 +121,14 @@ H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *f
int mpi_code; /* MPI return code */
unsigned u; /* Local index variable */
+ /* Disallow collective I/O if there are any I/O filters on chunks */
+ if(dset->shared->dcpl_cache.pline.nused>0)
+ HGOTO_DONE(FALSE)
+
/* Getting MPI communicator and rank */
- if((comm = H5F_mpi_get_comm(file))==MPI_COMM_NULL)
+ if((comm = H5F_mpi_get_comm(dset->ent.file))==MPI_COMM_NULL)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
- if((mpi_rank = H5F_mpi_get_rank(file))<0)
+ if((mpi_rank = H5F_mpi_get_rank(dset->ent.file))<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank")
/* Currently collective chunking storage
@@ -154,8 +159,8 @@ H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *f
if(H5S_SELECT_BOUNDS(file_space,startf,endf)==FAIL)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE,FAIL, "invalid check for single selection blocks");
- for(u=0; u < layout->u.chunk.ndims; u++)
- chunk_dim[u] = layout->u.chunk.dim[u];
+ for(u=0; u < dset->shared->layout.u.chunk.ndims; u++)
+ chunk_dim[u] = dset->shared->layout.u.chunk.dim[u];
/* Case 1: check whether all hyperslab in this process is inside one chunk.
Note: we don't handle when starting point is less than zero since that may cover
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index a5bbeaf..52664f3 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -290,8 +290,8 @@ H5_DLL herr_t H5D_mpio_spaces_write(H5D_io_info_t *io_info,
/* MPI-IO function to check if a direct I/O transfer is possible between
* memory and the file */
-H5_DLL htri_t H5D_mpio_opt_possible(const H5F_t *file, const H5S_t *mem_space,
- const H5S_t *file_space, const unsigned flags, const H5O_layout_t *layout);
+H5_DLL htri_t H5D_mpio_opt_possible(const H5D_t *dset, const H5S_t *mem_space,
+ const H5S_t *file_space, const unsigned flags);
#endif /* H5_HAVE_PARALLEL */
/* Testing functions */
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 680940f..c7d4bef 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -93,7 +93,7 @@ coll_chunk4(void)
char *filename;
int mpi_size;
MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Comm_size(comm,&mpi_size);
+ MPI_Comm_size(comm,&mpi_size);
filename = (char *) GetTestParameters();
coll_chunktest(filename,mpi_size*2,BYROW_DISCONT);
@@ -339,17 +339,8 @@ ccslab_set(int mpi_rank, int mpi_size, hssize_t start[], hsize_t count[],
break;
case BYROW_DISCONT:
/* Each process takes several disjoint blocks. */
- /*
- block[0] = 2;
- block[1] = 2;
- stride[0] = 3;
- stride[1] = 6;
- count[0] = 2;
- count[1] = 3;
- */
-
- block[0] = 1;
- block[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
stride[0] = 3;
stride[1] = 3;
count[0] = (SPACE_DIM1/mpi_size)/(stride[0]*block[0]);
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index baa1b51..b45fdb3 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -232,7 +232,7 @@ dataset_writeInd(void)
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
hsize_t dims[RANK]; /* dataset dim sizes */
DATATYPE *data_array1 = NULL; /* data buffer */
- char *filename;
+ const char *filename;
hssize_t start[RANK]; /* for hyperslab setting */
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
@@ -244,7 +244,7 @@ dataset_writeInd(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Independent write test on file %s\n", filename);
@@ -378,7 +378,7 @@ dataset_readInd(void)
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
DATATYPE *data_array1 = NULL; /* data buffer */
DATATYPE *data_origin1 = NULL; /* expected data buffer */
- char *filename;
+ const char *filename;
hssize_t start[RANK]; /* for hyperslab setting */
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
@@ -390,7 +390,7 @@ dataset_readInd(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Independent read test on file %s\n", filename);
@@ -504,7 +504,7 @@ dataset_writeAll(void)
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
hsize_t dims[RANK]; /* dataset dim sizes */
DATATYPE *data_array1 = NULL; /* data buffer */
- char *filename;
+ const char *filename;
hssize_t start[RANK]; /* for hyperslab setting */
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
@@ -516,7 +516,7 @@ dataset_writeAll(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Collective write test on file %s\n", filename);
@@ -863,7 +863,7 @@ dataset_readAll(void)
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
DATATYPE *data_array1 = NULL; /* data buffer */
DATATYPE *data_origin1 = NULL; /* expected data buffer */
- char *filename;
+ const char *filename;
hssize_t start[RANK]; /* for hyperslab setting */
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
@@ -875,7 +875,7 @@ dataset_readAll(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Collective read test on file %s\n", filename);
@@ -1084,7 +1084,7 @@ extend_writeInd(void)
hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2; /* Dataset ID */
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
- char *filename;
+ const char *filename;
hsize_t dims[RANK]; /* dataset dim sizes */
hsize_t max_dims[RANK] =
{H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
@@ -1103,7 +1103,7 @@ extend_writeInd(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Extend independent write test on file %s\n", filename);
@@ -1312,7 +1312,7 @@ extend_writeInd(void)
void
extend_writeInd2(void)
{
- char *filename;
+ const char *filename;
hid_t fid; /* HDF5 file ID */
hid_t fapl; /* File access templates */
hid_t fs; /* File dataspace ID */
@@ -1331,7 +1331,7 @@ extend_writeInd2(void)
int i; /* Local index variable */
herr_t ret; /* Generic return value */
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Extend independent write test #2 on file %s\n", filename);
@@ -1485,7 +1485,7 @@ extend_readInd(void)
DATATYPE *data_array1 = NULL; /* data buffer */
DATATYPE *data_array2 = NULL; /* data buffer */
DATATYPE *data_origin1 = NULL; /* expected data buffer */
- char *filename;
+ const char *filename;
hssize_t start[RANK]; /* for hyperslab setting */
hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
@@ -1497,7 +1497,7 @@ extend_readInd(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Extend independent read test on file %s\n", filename);
@@ -1662,7 +1662,7 @@ extend_writeAll(void)
hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2; /* Dataset ID */
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
- char *filename;
+ const char *filename;
hsize_t dims[RANK]; /* dataset dim sizes */
hsize_t max_dims[RANK] =
{H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
@@ -1681,7 +1681,7 @@ extend_writeAll(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Extend independent write test on file %s\n", filename);
@@ -1907,7 +1907,7 @@ extend_readAll(void)
hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2; /* Dataset ID */
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
- char *filename;
+ const char *filename;
hsize_t dims[RANK]; /* dataset dim sizes */
DATATYPE *data_array1 = NULL; /* data buffer */
DATATYPE *data_array2 = NULL; /* data buffer */
@@ -1923,7 +1923,7 @@ extend_readAll(void)
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- filename = (char *) GetTestParameters();
+ filename = GetTestParameters();
if (VERBOSE_MED)
printf("Extend independent read test on file %s\n", filename);
@@ -2078,3 +2078,161 @@ extend_readAll(void)
if (data_array2) free(data_array2);
if (data_origin1) free(data_origin1);
}
+
+/*
+ * Example of using the parallel HDF5 library to read a compressed
+ * dataset in an HDF5 file with collective parallel access support.
+ */
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+void
+compress_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ int rank=1; /* Dataspace rank */
+ hsize_t dim=dim0; /* Dataspace dimensions */
+ unsigned u; /* Local index variable */
+ hbool_t use_gpfs = FALSE; /* Use GPFS hints */
+ DATATYPE *data_read = NULL; /* data buffer */
+ DATATYPE *data_orig = NULL; /* expected data buffer */
+ const char *filename;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+
+ filename = GetTestParameters();
+ if (VERBOSE_MED)
+ printf("Collective chunked dataset read test on file %s\n", filename);
+
+ /* Retrieve MPI parameters */
+ MPI_Comm_size(comm,&mpi_size);
+ MPI_Comm_rank(comm,&mpi_rank);
+
+ /* Allocate data buffer */
+ data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ VRFY((data_orig != NULL), "data_origin1 malloc succeeded");
+ data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ VRFY((data_read != NULL), "data_array1 malloc succeeded");
+
+ /* Initialize data buffers */
+ for(u=0; u<dim;u++)
+ data_orig[u]=u;
+
+ /* Process zero creates the file with a compressed, chunked dataset */
+ if(mpi_rank==0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
+
+ /* Create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+
+ /* Create property list for chunking and compression */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl > 0), "H5Pcreate succeeded");
+
+ ret=H5Pset_layout(dcpl, H5D_CHUNKED);
+ VRFY((ret >= 0), "H5Pset_layout succeeded");
+
+ /* Use eight chunks */
+ chunk_dim=dim/8;
+ ret=H5Pset_chunk(dcpl, rank, &chunk_dim);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ ret=H5Pset_deflate(dcpl, 9);
+ VRFY((ret >= 0), "H5Pset_deflate succeeded");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(rank, &dim, NULL);
+ VRFY((dataspace > 0), "H5Screate_simple succeeded");
+
+ /* Create dataset */
+ dataset = H5Dcreate(fid, "compressed_data", H5T_NATIVE_INT, dataspace, dcpl);
+ VRFY((dataset > 0), "H5Screate_simple succeeded");
+
+ /* Write compressed data */
+ ret=H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Close objects */
+ ret=H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret=H5Sclose(dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret=H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret=H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ /* Wait for file to be created */
+ MPI_Barrier(comm);
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type, use_gpfs);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ VRFY((fid > 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret=H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /* Open dataset with compressed chunks */
+ dataset = H5Dopen(fid, "compressed_data");
+ VRFY((dataset > 0), "H5Dopen succeeded");
+
+ /* Try reading & writing data */
+ if(dataset>0) {
+ /* Create dataset transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist > 0), "H5Pcreate succeeded");
+
+ ret=H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Try reading the data */
+ ret=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Verify data read */
+ for(u=0; u<dim; u++)
+ if(data_orig[u]!=data_read[u]) {
+ printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
+ (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ nerrors++;
+ }
+
+ /* Writing to the compressed, chunked dataset in parallel should fail */
+ H5E_BEGIN_TRY {
+ ret=H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ } H5E_END_TRY;
+ VRFY((ret < 0), "H5Dwrite failed");
+
+ ret=H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret=H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ } /* end if */
+
+ ret=H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* release data buffers */
+ if (data_read) HDfree(data_read);
+ if (data_orig) HDfree(data_orig);
+}
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
diff --git a/testpar/t_file.c b/testpar/t_file.c
index c83a1af..6fcde32 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -12,8 +12,6 @@
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/* $Id$ */
-
/*
* Parallel tests for file operations
*/
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index e437b22..191a1f8 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -12,8 +12,6 @@
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/* $Id$ */
-
#include "testphdf5.h"
#define DIM 2
@@ -1291,6 +1289,295 @@ void get_slab(hssize_t chunk_origin[],
count[0] = count[1] = 1;
}
+/*
+ * This function is based on bug demonstration code provided by Thomas
+ * Guignon (thomas.guignon@ifp.fr), and is intended to verify the
+ * correctness of my fix for that bug.
+ *
+ * In essence, the bug appeared when at least one process attempted to
+ * write a point selection -- for which collective I/O is not supported,
+ * and at least one other attempted to write some other type of selection
+ * for which collective I/O is supported.
+ *
+ * Since the processes did not compare notes before performing the I/O,
+ * some would attempt collective I/O while others performed independent
+ * I/O. A hang resulted.
+ *
+ * This function reproduces this situation. At present the test hangs
+ * on failure.
+ * JRM - 9/13/04
+ *
+ * Changes: None.
+ */
+
+#define N 4
+
+void io_mode_confusion(void)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+
+ const int rank = 1;
+ const char *dataset_name = "IntArray";
+
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace */
+ /* identifiers */
+ hsize_t dimsf[1]; /* dataset dimensions */
+ int data[N] = {1}; /* pointer to data buffer to write */
+ hssize_t select[N] = {0L,1L,2L,3L};
+ hssize_t start[1];
+ hsize_t stride[1];
+ hsize_t count[1];
+ hsize_t block[1];
+ hid_t plist_id; /* property list identifier */
+ int i;
+ herr_t status;
+
+
+ /*
+ * MPI variables
+ */
+
+ int mpi_size, mpi_rank;
+
+
+ /*
+ * test bed related variables
+ */
+
+ char * fcn_name = "io_mode_confusion";
+ const hbool_t verbose = FALSE;
+ H5Ptest_param_t * pt;
+ char * filename;
+
+
+ pt = (H5Ptest_param_t *) GetTestParameters();
+ filename = pt->name;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
+ mpi_rank, fcn_name);
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ VRFY((plist_id != -1), "H5Pcreate() failed");
+
+ status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ VRFY(( status >= 0 ), "H5Pset_fapl_mpio() failed");
+
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name);
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+
+ VRFY(( file_id >= 0 ), "H5Fcreate() failed");
+
+ status = H5Pclose(plist_id);
+
+ VRFY(( status >= 0 ), "H5Pclose() failed");
+
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n",
+ mpi_rank, fcn_name);
+
+ dimsf[0] = N;
+
+ filespace = H5Screate_simple(rank, dimsf, NULL);
+
+ VRFY(( filespace >= 0 ), "H5Screate_simple() failed.");
+
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "%0d:%s: Creating the dataset, and closing filespace.\n",
+ mpi_rank, fcn_name);
+
+ dset_id = H5Dcreate(file_id, dataset_name, H5T_NATIVE_INT, filespace,
+ H5P_DEFAULT);
+
+ VRFY(( dset_id >= 0 ), "H5Dcreate() failed");
+
+ status = H5Sclose(filespace);
+
+ VRFY(( status >= 0 ), "H5Sclose() failed");
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n",
+ mpi_rank, fcn_name);
+
+ memspace = H5Screate_simple(rank, dimsf, NULL);
+
+ VRFY(( memspace >= 0 ), "H5Screate_simple() failed.");
+
+
+ if( mpi_rank == 0 ) {
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n",
+ mpi_rank, fcn_name);
+
+ status = H5Sselect_all(memspace);
+
+ VRFY(( status >= 0 ), "H5Sselect_all() failed");
+
+ } else {
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n",
+ mpi_rank, fcn_name);
+
+ status = H5Sselect_none(memspace);
+
+ VRFY(( status >= 0 ), "H5Sselect_none() failed");
+
+ }
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
+ mpi_rank, fcn_name);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n",
+ mpi_rank, fcn_name);
+
+ filespace = H5Dget_space(dset_id);
+
+ VRFY(( filespace >= 0 ), "H5Dget_space() failed");
+
+
+ start[0] = 0L;
+ stride[0] = 1;
+ count[0] = 1;
+ block[0] = N;
+
+ if ( mpi_rank == 0 ) {
+
+ /* select all */
+
+ if ( verbose )
+ HDfprintf(stdout,
+ "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n",
+ mpi_rank, fcn_name);
+
+ status = H5Sselect_elements(filespace, H5S_SELECT_SET, N,
+ (const hssize_t **)&select);
+
+ VRFY(( status >= 0 ), "H5Sselect_elements() failed");
+
+ } else {
+
+ /* select nothing */
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n",
+ mpi_rank, fcn_name);
+
+ status = H5Sselect_none(filespace);
+
+ VRFY(( status >= 0 ), "H5Sselect_none() failed");
+
+ }
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
+ mpi_rank, fcn_name);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name);
+
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+
+ VRFY(( plist_id != -1 ), "H5Pcreate() failed");
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n",
+ mpi_rank, fcn_name);
+
+ status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+
+ VRFY(( status >= 0 ), "H5Pset_dxpl_mpio() failed");
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n",
+ mpi_rank, fcn_name);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace,
+ plist_id, data);
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n",
+ mpi_rank, fcn_name, status);
+
+ VRFY(( status >= 0 ), "H5Dwrite() failed");
+
+ /*
+ * Close/release resources.
+ */
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n",
+ mpi_rank, fcn_name);
+
+ status = H5Dclose(dset_id);
+ VRFY(( status >= 0 ), "H5Dclose() failed");
+
+ status = H5Sclose(filespace);
+ VRFY(( status >= 0 ), "H5Dclose() failed");
+
+ status = H5Sclose(memspace);
+ VRFY(( status >= 0 ), "H5Sclose() failed");
+
+ status = H5Pclose(plist_id);
+ VRFY(( status >= 0 ), "H5Pclose() failed");
+
+ status = H5Fclose(file_id);
+ VRFY(( status >= 0 ), "H5Fclose() failed");
+
+
+ if ( verbose )
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank);
+
+ return;
+
+} /* io_mode_confusion() */
+
+#undef N
+
/*=============================================================================
* End of t_mdset.c
*===========================================================================*/
diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c
index 85b5924..0c92c46 100644
--- a/testpar/t_ph5basic.c
+++ b/testpar/t_ph5basic.c
@@ -11,7 +11,6 @@
* http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-/* $Id$ */
/*
* Test parallel HDF5 basic components
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 1db6e9a..d8fb74b 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -41,7 +41,7 @@ void *old_client_data; /* previous error handler arg.*/
/* FILENAME and filenames must have the same number of names.
* Use PARATESTFILE in general and use a separated filename only if the file
* created in one test is accessed by a different test.
- * filenames[0] is reserved as the file name for PARATESTFILE.
+ * FILENAME[0] is reserved as the file name for PARATESTFILE.
*/
#define PARATESTFILE filenames[0]
const char *FILENAME[2]={
@@ -334,6 +334,7 @@ int main(int argc, char **argv)
int mpi_size, mpi_rank; /* mpi variables */
H5Ptest_param_t ndsets_params, ngroups_params;
H5Ptest_param_t collngroups_params;
+ H5Ptest_param_t io_mode_confusion_params;
/* Un-buffer the stdout and stderr */
setbuf(stderr, NULL);
@@ -384,6 +385,11 @@ int main(int argc, char **argv)
AddTest("eidsetw2", extend_writeInd2, NULL,
"extendible dataset independent write #2", PARATESTFILE);
+#ifdef H5_HAVE_FILTER_DEFLATE
+ AddTest("cmpdsetr", compress_readAll, NULL,
+ "compressed dataset collective read", PARATESTFILE);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
ndsets_params.name = PARATESTFILE;
ndsets_params.count = ndatasets;
AddTest("ndsetw", multiple_dset_write, NULL,
@@ -432,6 +438,13 @@ int main(int argc, char **argv)
"collective to independent chunk io",PARATESTFILE);
}
+ io_mode_confusion_params.name = PARATESTFILE;
+ io_mode_confusion_params.count = 0; /* value not used */
+
+ AddTest("I/Omodeconf", io_mode_confusion, NULL,
+ "I/O mode confusion test -- hangs quickly on failure",
+ &io_mode_confusion_params);
+
/* Display testing information */
TestInfo(argv[0]);
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index b295317..a01c62e 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -133,7 +133,7 @@ typedef int DATATYPE;
extern int dim0, dim1; /*Dataset dimensions */
extern int chunkdim0, chunkdim1; /*Chunk dimensions */
extern int nerrors; /*errors count */
-extern H5E_auto_t old_func; /* previous error handler */
+extern H5E_auto_t old_func; /* previous error handler */
extern void *old_client_data; /*previous error handler arg.*/
extern int facc_type; /*Test file access type */
@@ -162,6 +162,10 @@ void coll_chunk1(void);
void coll_chunk2(void);
void coll_chunk3(void);
void coll_chunk4(void);
+void io_mode_confusion(void);
+#ifdef H5_HAVE_FILTER_DEFLATE
+void compress_readAll(void);
+#endif /* H5_HAVE_FILTER_DEFLATE */
/* commonly used prototypes */
hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type, hbool_t use_gpfs);