diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2004-10-04 20:29:31 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2004-10-04 20:29:31 (GMT) |
commit | 74a448d084ac1e09be38fdf80c7b7298e3957ea4 (patch) | |
tree | 0f47590955318bf799c5c6d7cefeecc66fc9bebe /src/H5Dmpio.c | |
parent | e2f3ab0ab01044d515eb74b64d24a569d06e3d86 (diff) | |
download | hdf5-74a448d084ac1e09be38fdf80c7b7298e3957ea4.zip hdf5-74a448d084ac1e09be38fdf80c7b7298e3957ea4.tar.gz hdf5-74a448d084ac1e09be38fdf80c7b7298e3957ea4.tar.bz2 |
[svn-r9358] Purpose:
Bug fix
Description:
Relax restrictions on parallel I/O to allow compressed, chunked datasets
to be read in parallel (collective access will be degraded to independent
access, but will retrieve the information still).
Platforms tested:
FreeBSD 4.10 (sleipnir) w/parallel
Solaris 2.7 (arabica)
IRIX64 6.5 (modi4)
h5committest
Diffstat (limited to 'src/H5Dmpio.c')
-rw-r--r-- | src/H5Dmpio.c | 15 |
1 files changed, 10 insertions, 5 deletions
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 032dea9..08c6a48 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -63,7 +63,7 @@ H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size, *------------------------------------------------------------------------- */ htri_t -H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *file_space, const unsigned flags,const H5O_layout_t *layout) +H5D_mpio_opt_possible( const H5D_t *dset, const H5S_t *mem_space, const H5S_t *file_space, const unsigned flags) { htri_t c1,c2; /* Flags whether a selection is optimizable */ htri_t ret_value=TRUE; @@ -71,6 +71,7 @@ H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *f FUNC_ENTER_NOAPI(H5D_mpio_opt_possible, FAIL); /* Check args */ + assert(dset); assert(mem_space); assert(file_space); @@ -116,10 +117,14 @@ H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *f int mpi_code; /* MPI return code */ unsigned u; /* Local index variable */ + /* Disallow collective I/O if there are any I/O filters on chunks */ + if(dset->shared->dcpl_cache.pline.nused>0) + HGOTO_DONE(FALSE) + /* Getting MPI communicator and rank */ - if((comm = H5F_mpi_get_comm(file))==MPI_COMM_NULL) + if((comm = H5F_mpi_get_comm(dset->ent.file))==MPI_COMM_NULL) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator") - if((mpi_rank = H5F_mpi_get_rank(file))<0) + if((mpi_rank = H5F_mpi_get_rank(dset->ent.file))<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank") /* Currently collective chunking storage @@ -150,8 +155,8 @@ H5D_mpio_opt_possible( const H5F_t *file, const H5S_t *mem_space, const H5S_t *f if(H5S_SELECT_BOUNDS(file_space,startf,endf)==FAIL) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE,FAIL, "invalid check for single selection blocks"); - for(u=0; u < layout->u.chunk.ndims; u++) - chunk_dim[u] = layout->u.chunk.dim[u]; + for(u=0; u < dset->shared->layout.u.chunk.ndims; u++) + chunk_dim[u] = dset->shared->layout.u.chunk.dim[u]; /* Case 1: check whether all hyperslab in this process is inside one chunk. Note: we don't handle when starting point is less than zero since that may cover |