summaryrefslogtreecommitdiffstats
path: root/src/H5Dmpio.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5Dmpio.c')
-rw-r--r--src/H5Dmpio.c432
1 files changed, 122 insertions, 310 deletions
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 79572c0..669da68 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -32,6 +32,7 @@
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
+#include "H5CXprivate.h" /* API Contexts */
#include "H5Dpkg.h" /* Datasets */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fprivate.h" /* File access */
@@ -207,31 +208,23 @@ typedef struct H5D_filtered_collective_io_info_t {
static herr_t H5D__chunk_collective_io(H5D_io_info_t *io_info,
const H5D_type_info_t *type_info, H5D_chunk_map_t *fm);
static herr_t H5D__multi_chunk_collective_io(H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, H5D_chunk_map_t *fm,
- H5P_genplist_t *dx_plist);
+ const H5D_type_info_t *type_info, H5D_chunk_map_t *fm);
static herr_t H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, H5D_chunk_map_t *fm,
- H5P_genplist_t *dx_plist);
+ const H5D_type_info_t *type_info, H5D_chunk_map_t *fm);
static herr_t H5D__link_chunk_collective_io(H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, H5D_chunk_map_t *fm, int sum_chunk,
- H5P_genplist_t *dx_plist);
+ const H5D_type_info_t *type_info, H5D_chunk_map_t *fm, int sum_chunk);
static herr_t H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, H5D_chunk_map_t *fm,
- H5P_genplist_t *dx_plist);
+ const H5D_type_info_t *type_info, H5D_chunk_map_t *fm);
static herr_t H5D__inter_collective_io(H5D_io_info_t *io_info,
const H5D_type_info_t *type_info, const H5S_t *file_space,
const H5S_t *mem_space);
static herr_t H5D__final_collective_io(H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, hsize_t nelmts, MPI_Datatype *mpi_file_type,
- MPI_Datatype *mpi_buf_type);
+ const H5D_type_info_t *type_info, hsize_t nelmts, MPI_Datatype mpi_file_type,
+ MPI_Datatype mpi_buf_type);
static herr_t H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
H5D_chunk_addr_info_t chunk_addr_info_array[], int many_chunk_opt);
static herr_t H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_chunk_map_t *fm,
- H5P_genplist_t *dx_plist, uint8_t assign_io_mode[], haddr_t chunk_addr[]);
-static herr_t H5D__ioinfo_xfer_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist,
- H5FD_mpio_xfer_t xfer_mode);
-static herr_t H5D__ioinfo_coll_opt_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist,
- H5FD_mpio_collective_opt_t coll_opt_mode);
+ uint8_t assign_io_mode[], haddr_t chunk_addr[]);
static herr_t H5D__mpio_get_min_chunk(const H5D_io_info_t *io_info,
const H5D_chunk_map_t *fm, int *min_chunkf);
static herr_t H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info,
@@ -284,11 +277,12 @@ static int H5D__cmp_filtered_collective_io_info_entry_owner(const void *filtered
*/
htri_t
H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
- const H5S_t *mem_space, const H5D_type_info_t *type_info, H5P_genplist_t *dx_plist)
+ const H5S_t *mem_space, const H5D_type_info_t *type_info)
{
- int local_cause = 0; /* Local reason(s) for breaking collective mode */
- int global_cause = 0; /* Global reason(s) for breaking collective mode */
- htri_t ret_value = SUCCEED; /* Return value */
+ H5FD_mpio_xfer_t io_xfer_mode; /* MPI I/O transfer mode */
+ int local_cause = 0; /* Local reason(s) for breaking collective mode */
+ int global_cause = 0; /* Global reason(s) for breaking collective mode */
+ htri_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -300,7 +294,9 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
/* For independent I/O, get out quickly and don't try to form consensus */
- if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_INDEPENDENT)
+ if(H5CX_get_io_xfer_mode(&io_xfer_mode) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode")
+ if(io_xfer_mode == H5FD_MPIO_INDEPENDENT)
local_cause |= H5D_MPIO_SET_INDEPENDENT;
/* Optimized MPI types flag must be set */
@@ -350,13 +346,9 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code)
} /* end else */
- /* Write the local value of no-collective-cause to the DXPL. */
- if(H5P_set(dx_plist, H5D_MPIO_LOCAL_NO_COLLECTIVE_CAUSE_NAME, &local_cause) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set local no collective cause property")
-
- /* Write the global value of no-collective-cause to the DXPL. */
- if(H5P_set(dx_plist, H5D_MPIO_GLOBAL_NO_COLLECTIVE_CAUSE_NAME, &global_cause) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set global no collective cause property")
+ /* Set the local & global values of no-collective-cause in the API context */
+ H5CX_set_mpio_local_no_coll_cause(local_cause);
+ H5CX_set_mpio_global_no_coll_cause(global_cause);
/* Set the return value, based on the global cause */
ret_value = global_cause > 0 ? FALSE : TRUE;
@@ -387,7 +379,7 @@ H5D__mpio_select_read(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATT
FUNC_ENTER_PACKAGE
H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t);
- if(H5F_block_read(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->raw_dxpl_id, io_info->u.rbuf) < 0)
+ if(H5F_block_read(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->u.rbuf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "can't finish collective parallel read")
done:
@@ -417,7 +409,7 @@ H5D__mpio_select_write(const H5D_io_info_t *io_info, const H5D_type_info_t H5_AT
/*OKAY: CAST DISCARDS CONST QUALIFIER*/
H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t);
- if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->raw_dxpl_id, io_info->u.wbuf) < 0)
+ if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->u.wbuf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "can't finish collective parallel write")
done:
@@ -533,81 +525,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5D__ioinfo_xfer_mode
- *
- * Purpose: Switch to between collective & independent MPI I/O
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * Friday, August 12, 2005
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5D__ioinfo_xfer_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist,
- H5FD_mpio_xfer_t xfer_mode)
-{
- herr_t ret_value = SUCCEED; /* return value */
-
- FUNC_ENTER_STATIC
-
- /* Change the xfer_mode */
- io_info->dxpl_cache->xfer_mode = xfer_mode;
- if(H5P_set(dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &io_info->dxpl_cache->xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode")
-
- /* Change the "single I/O" function pointers */
- if(xfer_mode == H5FD_MPIO_INDEPENDENT) {
- /* Set the pointers to the original, non-MPI-specific routines */
- io_info->io_ops.single_read = io_info->orig.io_ops.single_read;
- io_info->io_ops.single_write = io_info->orig.io_ops.single_write;
- } /* end if */
- else {
- HDassert(xfer_mode == H5FD_MPIO_COLLECTIVE);
-
- /* Set the pointers to the MPI-specific routines */
- io_info->io_ops.single_read = H5D__mpio_select_read;
- io_info->io_ops.single_write = H5D__mpio_select_write;
- } /* end else */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__ioinfo_xfer_mode() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5D__ioinfo_coll_opt_mode
- *
- * Purpose: Switch between using collective & independent MPI I/O w/file
- * set view
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: MuQun Yang
- * Oct. 5th, 2006
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5D__ioinfo_coll_opt_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist,
- H5FD_mpio_collective_opt_t coll_opt_mode)
-{
- herr_t ret_value = SUCCEED; /* return value */
-
- FUNC_ENTER_STATIC
-
- /* Change the optimal xfer_mode */
- io_info->dxpl_cache->coll_opt_mode = coll_opt_mode;
- if(H5P_set(dx_plist, H5D_XFER_MPIO_COLLECTIVE_OPT_NAME, &io_info->dxpl_cache->coll_opt_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__ioinfo_coll_opt_mode() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5D__mpio_get_min_chunk
*
* Purpose: Routine for obtaining minimum number of chunks to cover
@@ -699,28 +616,21 @@ H5D__contig_collective_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_
H5D_chunk_map_t H5_ATTR_UNUSED *fm)
{
H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
- H5P_genplist_t *dx_plist; /* Pointer to DXPL */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(H5FD_MPIO == H5F_DRIVER_ID(io_info->dset->oloc.file));
- HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER));
/* Call generic internal collective I/O routine */
if(H5D__inter_collective_io(io_info, type_info, file_space, mem_space) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't finish shared collective MPI-IO")
- /* Obtain the data transfer properties */
- if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
-
/* Set the actual I/O mode property. internal_collective_io will not break to
* independent I/O, so we set it here.
*/
- if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property")
+ H5CX_set_mpio_actual_io_mode(actual_io_mode);
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -746,28 +656,21 @@ H5D__contig_collective_write(H5D_io_info_t *io_info, const H5D_type_info_t *type
H5D_chunk_map_t H5_ATTR_UNUSED *fm)
{
H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
- H5P_genplist_t *dx_plist; /* Pointer to DXPL */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(H5FD_MPIO == H5F_DRIVER_ID(io_info->dset->oloc.file));
- HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER));
/* Call generic internal collective I/O routine */
if(H5D__inter_collective_io(io_info, type_info, file_space, mem_space) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't finish shared collective MPI-IO")
- /* Obtain the data transfer properties */
- if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
-
/* Set the actual I/O mode property. internal_collective_io will not break to
* independent I/O, so we set it here.
*/
- if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property")
+ H5CX_set_mpio_actual_io_mode(actual_io_mode);
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -814,7 +717,6 @@ static herr_t
H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
H5D_chunk_map_t *fm)
{
- H5P_genplist_t *dx_plist; /* Pointer to DXPL */
H5FD_mpio_chunk_opt_t chunk_opt_mode;
int io_option = H5D_MULTI_CHUNK_IO_MORE_OPT;
int sum_chunk = -1;
@@ -831,13 +733,9 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
HDassert(type_info);
HDassert(fm);
- /* Obtain the data transfer properties */
- if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
-
/* Check the optional property list for the collective chunk IO optimization option */
- if(H5P_get(dx_plist, H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME, &chunk_opt_mode) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't get chunk optimization option")
+ if(H5CX_get_mpio_chunk_opt_mode(&chunk_opt_mode) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get chunk optimization option")
if(H5FD_MPIO_CHUNK_ONE_IO == chunk_opt_mode)
io_option = H5D_ONE_LINK_CHUNK_IO; /*no opt*/
@@ -855,8 +753,8 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
/* Get the chunk optimization option threshold */
- if(H5P_get(dx_plist, H5D_XFER_MPIO_CHUNK_OPT_NUM_NAME, &one_link_chunk_io_threshold) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't get chunk optimization option threshold value")
+ if(H5CX_get_mpio_chunk_opt_num(&one_link_chunk_io_threshold) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get chunk optimization option threshold value")
/* step 1: choose an IO option */
/* If the average number of chunk per process is greater than a threshold, we will do one link chunked IO. */
@@ -865,54 +763,30 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
else
temp_not_link_io = TRUE;
-#endif
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
} /* end else */
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
{
- H5P_genplist_t *plist; /* Property list pointer */
- htri_t check_prop;
- int new_value;
-
- /* Get the dataset transfer property list */
- if(NULL == (plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id)))
- HGOTO_ERROR(H5E_IO, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
-
- /*** Test collective chunk user-input optimization APIs. ***/
- check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME);
- if(check_prop > 0) {
- if(H5D_ONE_LINK_CHUNK_IO == io_option) {
- new_value = 0;
- if(H5P_set(plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &new_value) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value")
- } /* end if */
+ /*** Set collective chunk user-input optimization APIs. ***/
+ if(H5D_ONE_LINK_CHUNK_IO == io_option) {
+ if(H5CX_test_set_mpio_coll_chunk_link_hard(0) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
} /* end if */
- check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME);
- if(check_prop > 0) {
- if(H5D_MULTI_CHUNK_IO == io_option) {
- new_value = 0;
- if(H5P_set(plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &new_value) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value")
- } /* end if */
+ else if(H5D_MULTI_CHUNK_IO == io_option) {
+ if(H5CX_test_set_mpio_coll_chunk_multi_hard(0) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ } /* end else-if */
+ else if(H5D_ONE_LINK_CHUNK_IO_MORE_OPT == io_option) {
+ if(H5CX_test_set_mpio_coll_chunk_link_num_true(0) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
} /* end if */
- check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME);
- if(check_prop > 0) {
- if(H5D_ONE_LINK_CHUNK_IO_MORE_OPT == io_option) {
- new_value = 0;
- if(H5P_set(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &new_value) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value")
- } /* end if */
- } /* end if */
- check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME);
- if(check_prop > 0) {
- if(temp_not_link_io) {
- new_value = 0;
- if(H5P_set(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &new_value) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value")
- } /* end if */
+ else if(temp_not_link_io) {
+ if(H5CX_test_set_mpio_coll_chunk_link_num_false(0) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
} /* end if */
}
-#endif
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* step 2: Go ahead to do IO.*/
switch (io_option) {
@@ -925,34 +799,31 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
* complexity in unfiltering the data when it is read all at once into a
* single buffer.
*/
- if (io_info->op_type == H5D_IO_OP_READ) {
- if(H5D__multi_chunk_filtered_collective_io(io_info, type_info, fm, dx_plist) < 0)
+ if(io_info->op_type == H5D_IO_OP_READ) {
+ if(H5D__multi_chunk_filtered_collective_io(io_info, type_info, fm) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish optimized multiple filtered chunk MPI-IO")
} /* end if */
- else {
- if(H5D__link_chunk_filtered_collective_io(io_info, type_info, fm, dx_plist) < 0)
+ else
+ if(H5D__link_chunk_filtered_collective_io(io_info, type_info, fm) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish filtered linked chunk MPI-IO")
- } /* end else */
} /* end if */
- else {
+ else
/* Perform unfiltered link chunk collective IO */
- if(H5D__link_chunk_collective_io(io_info, type_info, fm, sum_chunk, dx_plist) < 0)
+ if(H5D__link_chunk_collective_io(io_info, type_info, fm, sum_chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish linked chunk MPI-IO")
- } /* end else */
break;
case H5D_MULTI_CHUNK_IO: /* direct request to do multi-chunk IO */
default: /* multiple chunk IO via threshold */
/* Check if there are any filters in the pipeline */
if(io_info->dset->shared->dcpl_cache.pline.nused > 0) {
- if(H5D__multi_chunk_filtered_collective_io(io_info, type_info, fm, dx_plist) < 0)
+ if(H5D__multi_chunk_filtered_collective_io(io_info, type_info, fm) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish optimized multiple filtered chunk MPI-IO")
} /* end if */
- else {
+ else
/* Perform unfiltered multi chunk collective IO */
- if(H5D__multi_chunk_collective_io(io_info, type_info, fm, dx_plist) < 0)
+ if(H5D__multi_chunk_collective_io(io_info, type_info, fm) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish optimized multiple chunk MPI-IO")
- } /* end else */
break;
} /* end switch */
@@ -1038,16 +909,11 @@ done:
* Programmer: Muqun Yang
* Monday, Feb. 13th, 2006
*
- * Modification:
- * - Set H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME and H5D_MPIO_ACTUAL_IO_MODE_NAME
- * dxpl in this.
- * Programmer: Jonathan Kim
- * Date: 2012-10-10
*-------------------------------------------------------------------------
*/
static herr_t
H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm, int sum_chunk, H5P_genplist_t *dx_plist)
+ H5D_chunk_map_t *fm, int sum_chunk)
{
H5D_chunk_addr_info_t *chunk_addr_info_array = NULL;
MPI_Datatype chunk_final_mtype; /* Final memory MPI datatype for all chunks with seletion */
@@ -1066,20 +932,16 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
int *chunk_mpi_file_counts = NULL; /* Count of MPI file datatype for each chunk */
int *chunk_mpi_mem_counts = NULL; /* Count of MPI memory datatype for each chunk */
int mpi_code; /* MPI return code */
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode = H5D_MPIO_LINK_CHUNK;
- H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CHUNK_COLLECTIVE;
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
/* Set the actual-chunk-opt-mode property. */
- if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property")
+ H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_LINK_CHUNK);
/* Set the actual-io-mode property.
* Link chunk I/O does not break to independent, so can set right away */
- if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property")
+ H5CX_set_mpio_actual_io_mode(H5D_MPIO_CHUNK_COLLECTIVE);
/* Get the sum # of chunks, if not already available */
if(sum_chunk < 0) {
@@ -1122,7 +984,7 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
mspace = chunk_info->mspace;
/* Look up address of chunk */
- if(H5D__chunk_lookup(io_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk address")
ctg_store.contig.dset_addr = udata.chunk_block.offset;
} /* end else */
@@ -1312,7 +1174,7 @@ if(H5DEBUG(D))
io_info->store = &ctg_store;
/* Perform final collective I/O operation */
- if(H5D__final_collective_io(io_info, type_info, mpi_buf_count, &chunk_final_ftype, &chunk_final_mtype) < 0)
+ if(H5D__final_collective_io(io_info, type_info, mpi_buf_count, chunk_final_ftype, chunk_final_mtype) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO")
} /* end else */
@@ -1402,12 +1264,10 @@ if(H5DEBUG(D))
*/
static herr_t
H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm, H5P_genplist_t *dx_plist)
+ H5D_chunk_map_t *fm)
{
H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
H5D_filtered_collective_io_info_t *collective_chunk_list = NULL; /* The list of chunks used during collective operations */
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode = H5D_MPIO_LINK_CHUNK; /* The actual chunk IO optimization mode */
- H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CHUNK_COLLECTIVE; /* The chunk IO mode used (Independent vs Collective) */
H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
MPI_Datatype mem_type = MPI_BYTE;
MPI_Datatype file_type = MPI_BYTE;
@@ -1425,7 +1285,6 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in
HDassert(io_info);
HDassert(type_info);
HDassert(fm);
- HDassert(dx_plist);
/* Obtain the current rank of the process and the number of processes */
if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
@@ -1434,14 +1293,12 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
/* Set the actual-chunk-opt-mode property. */
- if (H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property")
+ H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_LINK_CHUNK);
/* Set the actual-io-mode property.
* Link chunk filtered I/O does not break to independent, so can set right away
*/
- if (H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property")
+ H5CX_set_mpio_actual_io_mode(H5D_MPIO_CHUNK_COLLECTIVE);
/* Build a list of selected chunks in the collective io operation */
if (H5D__construct_filtered_io_info_list(io_info, type_info, fm, &chunk_list, &chunk_list_num_entries) < 0)
@@ -1454,7 +1311,6 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in
/* Construct chunked index info */
index_info.f = io_info->dset->oloc.file;
- index_info.dxpl_id = io_info->md_dxpl_id;
index_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
index_info.layout = &(io_info->dset->shared->layout.u.chunk);
index_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
@@ -1538,7 +1394,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in
io_info->store = &ctg_store;
/* Perform I/O */
- if (H5D__final_collective_io(io_info, type_info, mpi_buf_count, &file_type, &mem_type) < 0)
+ if (H5D__final_collective_io(io_info, type_info, mpi_buf_count, file_type, mem_type) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO")
/* Participate in the collective re-insertion of all chunks modified
@@ -1594,17 +1450,11 @@ done:
* Programmer: Muqun Yang
* Monday, Feb. 13th, 2006
*
- * Modification:
- * - Set H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME dxpl in this to go along with
- * setting H5D_MPIO_ACTUAL_IO_MODE_NAME dxpl at the bottom.
- * Programmer: Jonathan Kim
- * Date: 2012-10-10
- *
*-------------------------------------------------------------------------
*/
static herr_t
H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm, H5P_genplist_t *dx_plist)
+ H5D_chunk_map_t *fm)
{
H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
@@ -1614,22 +1464,19 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
uint8_t *chunk_io_option = NULL;
haddr_t *chunk_addr = NULL;
H5D_storage_t store; /* union of EFL and chunk pointer in file space */
- H5FD_mpio_xfer_t last_xfer_mode = H5FD_MPIO_COLLECTIVE; /* Last parallel transfer for this request (H5D_XFER_IO_XFER_MODE_NAME) */
H5FD_mpio_collective_opt_t last_coll_opt_mode = H5FD_MPIO_COLLECTIVE_IO; /* Last parallel transfer with independent IO or collective IO with this mode */
size_t total_chunk; /* Total # of chunks in dataset */
#ifdef H5Dmpio_DEBUG
int mpi_rank;
#endif
size_t u; /* Local index variable */
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode = H5D_MPIO_MULTI_CHUNK; /* actual chunk optimization mode */
H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_NO_COLLECTIVE; /* Local variable for tracking the I/O mode used. */
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
/* Set the actual chunk opt mode property */
- if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property")
+ H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_MULTI_CHUNK);
#ifdef H5Dmpio_DEBUG
mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file);
@@ -1648,7 +1495,7 @@ if(H5DEBUG(D))
#endif
/* Obtain IO option for each chunk */
- if(H5D__obtain_mpio_mode(io_info, fm, dx_plist, chunk_io_option, chunk_addr) < 0)
+ if(H5D__obtain_mpio_mode(io_info, fm, chunk_io_option, chunk_addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTRECV, FAIL, "unable to obtain MPIO mode")
/* Set up contiguous I/O info object */
@@ -1706,7 +1553,7 @@ if(H5DEBUG(D))
fspace = chunk_info->fspace;
mspace = chunk_info->mspace;
- /* Update the local variable tracking the dxpl's actual io mode property.
+ /* Update the local variable tracking the actual io mode property.
*
* Note: H5D_MPIO_COLLECTIVE_MULTI | H5D_MPIO_INDEPENDENT = H5D_MPIO_MIXED
* to ease switching between to mixed I/O without checking the current
@@ -1720,14 +1567,9 @@ if(H5DEBUG(D))
} /* end else */
/* Switch back to collective I/O */
- if(last_xfer_mode != H5FD_MPIO_COLLECTIVE) {
- if(H5D__ioinfo_xfer_mode(io_info, dx_plist, H5FD_MPIO_COLLECTIVE) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to collective I/O")
- last_xfer_mode = H5FD_MPIO_COLLECTIVE;
- } /* end if */
if(last_coll_opt_mode != H5FD_MPIO_COLLECTIVE_IO) {
- if(H5D__ioinfo_coll_opt_mode(io_info, dx_plist, H5FD_MPIO_COLLECTIVE_IO) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to collective I/O")
+ if(H5CX_set_mpio_coll_opt(H5FD_MPIO_COLLECTIVE_IO) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't switch to collective I/O")
last_coll_opt_mode = H5FD_MPIO_COLLECTIVE_IO;
} /* end if */
@@ -1751,7 +1593,7 @@ if(H5DEBUG(D))
fspace = chunk_info->fspace;
mspace = chunk_info->mspace;
- /* Update the local variable tracking the dxpl's actual io mode. */
+ /* Update the local variable tracking the actual io mode. */
actual_io_mode = (H5D_mpio_actual_io_mode_t) (actual_io_mode | H5D_MPIO_CHUNK_INDEPENDENT);
} /* end if */
else {
@@ -1760,8 +1602,8 @@ if(H5DEBUG(D))
/* Using independent I/O with file setview.*/
if(last_coll_opt_mode != H5FD_MPIO_INDIVIDUAL_IO) {
- if(H5D__ioinfo_coll_opt_mode(io_info, dx_plist, H5FD_MPIO_INDIVIDUAL_IO) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to individual I/O")
+ if(H5CX_set_mpio_coll_opt(H5FD_MPIO_INDIVIDUAL_IO) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't switch to individual I/O")
last_coll_opt_mode = H5FD_MPIO_INDIVIDUAL_IO;
} /* end if */
@@ -1778,9 +1620,8 @@ if(H5DEBUG(D))
} /* end else */
} /* end for */
- /* Write the local value of actual io mode to the DXPL. */
- if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property")
+ /* Write the local value of actual io mode to the API context. */
+ H5CX_set_mpio_actual_io_mode(actual_io_mode);
done:
if(chunk_io_option)
@@ -1850,12 +1691,10 @@ done:
*/
static herr_t
H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm, H5P_genplist_t *dx_plist)
+ H5D_chunk_map_t *fm)
{
H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
H5D_filtered_collective_io_info_t *collective_chunk_list = NULL; /* The list of chunks used during collective operations */
- H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode = H5D_MPIO_MULTI_CHUNK; /* The actual chunk IO optimization mode */
- H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CHUNK_COLLECTIVE; /* The chunk IO mode used (Independent vs Collective) */
H5D_storage_t store; /* union of EFL and chunk pointer in file space */
H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
@@ -1875,7 +1714,6 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
HDassert(io_info);
HDassert(type_info);
HDassert(fm);
- HDassert(dx_plist);
/* Obtain the current rank of the process and the number of processes */
if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
@@ -1884,14 +1722,12 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
/* Set the actual chunk opt mode property */
- if (H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property")
+ H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_MULTI_CHUNK);
/* Set the actual_io_mode property.
* Multi chunk I/O does not break to independent, so can set right away
*/
- if (H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk io mode property")
+ H5CX_set_mpio_actual_io_mode(H5D_MPIO_CHUNK_COLLECTIVE);
/* Build a list of selected chunks in the collective IO operation */
if (H5D__construct_filtered_io_info_list(io_info, type_info, fm, &chunk_list, &chunk_list_num_entries) < 0)
@@ -1922,7 +1758,6 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
/* Construct chunked index info */
index_info.f = io_info->dset->oloc.file;
- index_info.dxpl_id = io_info->md_dxpl_id;
index_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
index_info.layout = &(io_info->dset->shared->layout.u.chunk);
index_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
@@ -2039,7 +1874,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
} /* end else */
/* Perform the I/O */
- if (H5D__final_collective_io(&ctg_io_info, type_info, mpi_buf_count, &file_type_array[i], &mem_type_array[i]) < 0)
+ if (H5D__final_collective_io(&ctg_io_info, type_info, mpi_buf_count, file_type_array[i], mem_type_array[i]) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO")
/* Participate in the collective re-insertion of all chunks modified
@@ -2189,7 +2024,7 @@ if(H5DEBUG(D))
#endif
/* Perform final collective I/O operation */
- if(H5D__final_collective_io(io_info, type_info, (hsize_t)mpi_buf_count, &mpi_file_type, &mpi_buf_type) < 0)
+ if(H5D__final_collective_io(io_info, type_info, (hsize_t)mpi_buf_count, mpi_file_type, mpi_buf_type) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish collective MPI-IO")
done:
@@ -2222,15 +2057,15 @@ if(H5DEBUG(D))
*/
static herr_t
H5D__final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- hsize_t mpi_buf_count, MPI_Datatype *mpi_file_type, MPI_Datatype *mpi_buf_type)
+ hsize_t mpi_buf_count, MPI_Datatype mpi_file_type, MPI_Datatype mpi_buf_type)
{
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
/* Pass buf type, file type to the file driver. */
- if(H5FD_mpi_setup_collective(io_info->raw_dxpl_id, mpi_buf_type, mpi_file_type) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties")
+ if(H5CX_set_mpi_coll_datatypes(mpi_buf_type, mpi_file_type) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set MPI-I/O collective I/O datatypes")
if(io_info->op_type == H5D_IO_OP_WRITE) {
if((io_info->io_ops.single_write)(io_info, type_info, mpi_buf_count, NULL, NULL) < 0)
@@ -2444,7 +2279,7 @@ if(H5DEBUG(D))
H5D_chunk_ud_t udata; /* User data for querying chunk info */
/* Get address of chunk */
- if(H5D__chunk_lookup(io_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list")
chunk_addr = udata.chunk_block.offset;
} /* end if */
@@ -2520,7 +2355,7 @@ done:
*/
static herr_t
H5D__obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
- H5P_genplist_t *dx_plist, uint8_t assign_io_mode[], haddr_t chunk_addr[])
+ uint8_t assign_io_mode[], haddr_t chunk_addr[])
{
size_t total_chunks;
unsigned percent_nproc_per_chunk, threshold_nproc_per_chunk;
@@ -2535,10 +2370,6 @@ H5D__obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
int root;
size_t ic;
int mpi_code;
-#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- int new_value;
- htri_t check_prop;
-#endif
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -2555,8 +2386,8 @@ H5D__obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
/* Setup parameters */
H5_CHECKED_ASSIGN(total_chunks, size_t, fm->layout->u.chunk.nchunks, hsize_t);
- if(H5P_get(dx_plist, H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME, &percent_nproc_per_chunk) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't get percent nproc per chunk")
+ if(H5CX_get_mpio_chunk_opt_ratio(&percent_nproc_per_chunk) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get percent nproc per chunk")
/* if ratio is 0, perform collective io */
if(0 == percent_nproc_per_chunk) {
if(H5D__chunk_addrmap(io_info, chunk_addr) < 0)
@@ -2646,40 +2477,19 @@ H5D__obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
{
- H5P_genplist_t *plist; /* Property list pointer */
-
- /* Get the dataset transfer property list */
- if(NULL == (plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id)))
- HGOTO_ERROR(H5E_IO, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
-
- check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME);
- if(check_prop > 0) {
- for(ic = 0; ic < total_chunks; ic++) {
- if(assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) {
- new_value = 0;
- if(H5P_set(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &new_value) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value")
- break;
- } /* end if */
- } /* end for */
- } /* end if */
-
- check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME);
- if(check_prop > 0) {
- int temp_count = 0;
+ hbool_t coll_op = FALSE;
- for(ic = 0; ic < total_chunks; ic++) {
- if(assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) {
- temp_count++;
- break;
- } /* end if */
- } /* end for */
- if(temp_count == 0) {
- new_value = 0;
- if(H5P_set(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &new_value) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value")
+ for(ic = 0; ic < total_chunks; ic++)
+ if(assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) {
+ if(H5CX_test_set_mpio_coll_chunk_multi_ratio_coll(0) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ coll_op = TRUE;
+ break;
} /* end if */
- } /* end if */
+
+ if(!coll_op)
+ if(H5CX_test_set_mpio_coll_chunk_multi_ratio_ind(0) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
}
#endif
@@ -2732,7 +2542,6 @@ H5D__construct_filtered_io_info_list(const H5D_io_info_t *io_info, const H5D_typ
HDassert(fm);
HDassert(chunk_list);
HDassert(num_entries);
- HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER));
if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
@@ -2745,15 +2554,15 @@ H5D__construct_filtered_io_info_list(const H5D_io_info_t *io_info, const H5D_typ
hssize_t select_npoints;
hssize_t chunk_npoints;
- if (NULL == (local_info_array = (H5D_filtered_collective_io_info_t *) H5MM_malloc(num_chunks_selected * sizeof(H5D_filtered_collective_io_info_t))))
+ if(NULL == (local_info_array = (H5D_filtered_collective_io_info_t *) H5MM_malloc(num_chunks_selected * sizeof(H5D_filtered_collective_io_info_t))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate local io info array buffer")
chunk_node = H5SL_first(fm->sel_chunks);
- for (i = 0; chunk_node; i++) {
+ for(i = 0; chunk_node; i++) {
chunk_info = (H5D_chunk_info_t *) H5SL_item(chunk_node);
/* Obtain this chunk's address */
- if (H5D__chunk_lookup(io_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0)
+ if(H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
local_info_array[i].index = chunk_info->index;
@@ -3000,8 +2809,7 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
mem_iter_init = TRUE;
/* Collect the modification data into the buffer */
- if (!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter,
- (size_t) iter_nelmts, io_info->dxpl_cache, mod_data_p))
+ if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, mod_data_p))
HGOTO_ERROR(H5E_IO, H5E_CANTGATHER, FAIL, "couldn't gather from write buffer")
/* Send modification data to new owner */
@@ -3212,6 +3020,8 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
H5D_chunk_info_t *chunk_info = NULL;
H5S_sel_iter_t *mem_iter = NULL; /* Memory iterator for H5D__scatter_mem/H5D__gather_mem */
unsigned char *mod_data = NULL; /* Chunk modification data sent by a process to a chunk's owner */
+ H5Z_EDC_t err_detect; /* Error detection info */
+ H5Z_cb_t filter_cb; /* I/O filter callback function */
unsigned filter_mask = 0;
hssize_t iter_nelmts; /* Number of points to iterate over for the chunk IO operation */
hssize_t extent_npoints;
@@ -3232,6 +3042,12 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
HDassert(type_info);
HDassert(fm);
+ /* Retrieve filter settings from API context */
+ if(H5CX_get_err_detect(&err_detect) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info")
+ if(H5CX_get_filter_cb(&filter_cb) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function")
+
/* Look up the chunk and get its file and memory dataspaces */
if (NULL == (chunk_info = (H5D_chunk_info_t *) H5SL_search(fm->sel_chunks, &chunk_entry->index)))
HGOTO_ERROR(H5E_DATASPACE, H5E_NOTFOUND, FAIL, "can't locate chunk in skip list")
@@ -3253,19 +3069,19 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
/* If this is not a full chunk overwrite or this is a read operation, the chunk must be
* read from the file and unfiltered.
*/
- if (!chunk_entry->full_overwrite || io_info->op_type == H5D_IO_OP_READ) {
+ if(!chunk_entry->full_overwrite || io_info->op_type == H5D_IO_OP_READ) {
chunk_entry->chunk_states.new_chunk.length = chunk_entry->chunk_states.chunk_current.length;
/* Currently, these chunk reads are done independently and will likely
* cause issues with collective metadata reads enabled. In the future,
* this should be refactored to use collective chunk reads - JTH */
- if (H5F_block_read(io_info->dset->oloc.file, H5FD_MEM_DRAW, chunk_entry->chunk_states.chunk_current.offset,
- chunk_entry->chunk_states.new_chunk.length, H5AC_rawdata_dxpl_id, chunk_entry->buf) < 0)
+ if(H5F_block_read(io_info->dset->oloc.file, H5FD_MEM_DRAW, chunk_entry->chunk_states.chunk_current.offset,
+ chunk_entry->chunk_states.new_chunk.length, chunk_entry->buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
- if (H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE, &filter_mask,
- io_info->dxpl_cache->err_detect, io_info->dxpl_cache->filter_cb,
- (size_t *) &chunk_entry->chunk_states.new_chunk.length, &buf_size, &chunk_entry->buf) < 0)
+ if(H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE,
+ &filter_mask, err_detect, filter_cb, (size_t *)&chunk_entry->chunk_states.new_chunk.length,
+ &buf_size, &chunk_entry->buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying")
} /* end if */
else {
@@ -3291,8 +3107,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
*/
switch (io_info->op_type) {
case H5D_IO_OP_READ:
- if (H5D__scatter_mem(chunk_entry->buf, chunk_info->mspace, mem_iter,
- (size_t) iter_nelmts, io_info->dxpl_cache, io_info->u.rbuf) < 0)
+ if (H5D__scatter_mem(chunk_entry->buf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, io_info->u.rbuf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer")
break;
@@ -3301,8 +3116,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer")
/* Gather modification data from the application write buffer into a temporary buffer */
- if (!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter,
- (size_t) iter_nelmts, io_info->dxpl_cache, tmp_gath_buf))
+ if(!H5D__gather_mem(io_info->u.wbuf, chunk_info->mspace, mem_iter, (size_t)iter_nelmts, tmp_gath_buf))
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't gather from write buffer")
if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
@@ -3320,8 +3134,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
/* Scatter the owner's modification data into the chunk data buffer according to
* the file space.
*/
- if (H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter,
- (size_t) iter_nelmts, io_info->dxpl_cache, chunk_entry->buf) < 0)
+ if(H5D__scatter_mem(tmp_gath_buf, chunk_info->fspace, mem_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to chunk data buffer")
if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
@@ -3351,8 +3164,7 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
/* Update the chunk data with the received modification data */
- if (H5D__scatter_mem(mod_data_p, dataspace, mem_iter, (size_t) iter_nelmts,
- io_info->dxpl_cache, chunk_entry->buf) < 0)
+ if(H5D__scatter_mem(mod_data_p, dataspace, mem_iter, (size_t) iter_nelmts, chunk_entry->buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't scatter to write buffer")
if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
@@ -3367,18 +3179,18 @@ H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk
} /* end for */
/* Filter the chunk */
- if (H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, 0, &filter_mask,
- io_info->dxpl_cache->err_detect, io_info->dxpl_cache->filter_cb,
- (size_t *) &chunk_entry->chunk_states.new_chunk.length, &buf_size, &chunk_entry->buf) < 0)
- HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
+ if(H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, 0, &filter_mask,
+ err_detect, filter_cb, (size_t *)&chunk_entry->chunk_states.new_chunk.length,
+ &buf_size, &chunk_entry->buf) < 0)
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
#if H5_SIZEOF_SIZE_T > 4
/* Check for the chunk expanding too much to encode in a 32-bit value */
if (chunk_entry->chunk_states.new_chunk.length > ((size_t) 0xffffffff))
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
#endif
-
break;
+
default:
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid I/O operation")
} /* end switch */