From fb7987e708eb08d7cf7675a2aff50668a4941a55 Mon Sep 17 00:00:00 2001 From: Albert Cheng Date: Mon, 27 Nov 2000 13:25:18 -0500 Subject: [svn-r3003] Purpose: Bug fix Description: The optimized MPI-IO calls, H5S_mpio_spaces_write/H5S_mpio_spaces_read, are changed for collective data transfer only since they call H5FD_mpio_setup to do setup to eveually call MPI_File_set_view in H5FD_mpio_read or H5FD_mpio_write. MPI_File_set_view is a collective call. Letting independent data transfer use this route would result in hanging. Solution: For now, the checking is being done in H5D_write and H5D_read before H5S_mpio_spaces_write/H5S_mpio_spaces_read is called because the checking code in H5S_mpio_spaces_xfer, though with the right idea, is not correct yet. Platforms tested: IRIX64-64 parallel. --- src/H5D.c | 21 +++++++++++++++++++-- src/H5Smpio.c | 30 +++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/src/H5D.c b/src/H5D.c index 3c1d8f8..1762a62 100644 --- a/src/H5D.c +++ b/src/H5D.c @@ -1551,6 +1551,10 @@ H5D_close(H5D_t *dataset) * Added the code that when it detects it is not safe to process a * COLLECTIVE read request without hanging, it changes it to * INDEPENDENT calls. + * + * Albert Cheng, 2000-11-27 + * Changed to use the optimized MPIO transfer for Collective calls only. + * *------------------------------------------------------------------------- */ herr_t @@ -1673,7 +1677,12 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, * turn out to be inappropriate for MPI-IO). */ if (H5_mpi_opt_types_g && IS_H5FD_MPIO(dataset->ent.file)) { - sconv->read = H5S_mpio_spaces_read; + /* Only collective write should call this since it eventually + * calls MPI_File_set_view which is a collective call. + * See H5S_mpio_spaces_xfer() for details. + */ + if (doing_mpio && xfer_mode==H5FD_MPIO_COLLECTIVE) + sconv->read = H5S_mpio_spaces_read; } #endif /*H5_HAVE_PARALLEL*/ @@ -1978,6 +1987,9 @@ printf("%s: check 2.0, src_type_size=%d, dst_type_size=%d, target_size=%d, min_e * COLLECTIVE write request without hanging, it changes it to * INDEPENDENT calls. * + * Albert Cheng, 2000-11-27 + * Changed to use the optimized MPIO transfer for Collective calls only. + * *------------------------------------------------------------------------- */ herr_t @@ -2137,7 +2149,12 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space, * turn out to be inappropriate for MPI-IO). */ if (H5_mpi_opt_types_g && IS_H5FD_MPIO(dataset->ent.file)) { - sconv->write = H5S_mpio_spaces_write; + /* Only collective write should call this since it eventually + * calls MPI_File_set_view which is a collective call. + * See H5S_mpio_spaces_xfer() for details. + */ + if (doing_mpio && xfer_mode==H5FD_MPIO_COLLECTIVE) + sconv->write = H5S_mpio_spaces_write; } #endif /*H5_HAVE_PARALLEL*/ diff --git a/src/H5Smpio.c b/src/H5Smpio.c index db6f762..fb92862 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -541,6 +541,7 @@ H5S_mpio_space_type( const H5S_t *space, const size_t elmt_size, * rky 980918 * Added must_convert parameter to let caller know we can't optimize * the xfer. + * * Albert Cheng, 001123 * Include the MPI_type freeing as part of cleanup code. * @@ -587,7 +588,34 @@ H5S_mpio_spaces_xfer(H5F_t *f, const struct H5O_layout_t *layout, *must_convert = 1; /* can't do optimized xfer; do the old way */ HGOTO_DONE(SUCCEED); } - + + /* + * For collective data transfer only since this would eventually + * call H5FD_mpio_setup to do setup to eveually call MPI_File_set_view + * in H5FD_mpio_read or H5FD_mpio_write. MPI_File_set_view is a + * collective call. Letting independent data transfer use this + * route would result in hanging. + */ +#if 0 + /* For now, the checking is being done in + * H5D_write and H5D_read before it is called because + * the following block of code, though with the right idea, is not + * correct yet. + */ + { /* Get the transfer mode */ + H5D_xfer_t *dxpl; + H5FD_mpio_dxpl_t *dx; + + if (H5P_DEFAULT!=dxpl_id && (dxpl=H5I_object(dxpl_id)) && + H5FD_MPIO==dxpl->driver_id && (dx=dxpl->driver_info) && + H5FD_MPIO_COLLECTIVE==dx->xfer_mode) { + /* let it fall through */ + }else{ + *must_convert = 1; /* can't do optimized xfer; do the old way */ + HGOTO_DONE(SUCCEED); + } + } +#endif /* create the MPI buffer type */ err = H5S_mpio_space_type( mem_space, elmt_size, -- cgit v0.12