diff options
-rw-r--r-- | src/H5Dio.c | 27 | ||||
-rw-r--r-- | src/H5Dmpio.c | 77 | ||||
-rw-r--r-- | src/H5Dprivate.h | 2 | ||||
-rw-r--r-- | src/H5Fsuper_cache.c | 2 | ||||
-rw-r--r-- | src/H5Pdxpl.c | 99 | ||||
-rw-r--r-- | src/H5Ppublic.h | 36 | ||||
-rw-r--r-- | testpar/t_dset.c | 549 | ||||
-rw-r--r-- | testpar/testphdf5.c | 3 | ||||
-rw-r--r-- | testpar/testphdf5.h | 16 |
9 files changed, 795 insertions, 16 deletions
diff --git a/src/H5Dio.c b/src/H5Dio.c index 7ee304c..7268b25 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -340,7 +340,7 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, #ifdef H5_HAVE_PARALLEL /* Collective access is not permissible without a MPI based VFD */ if(dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE && - !(H5F_HAS_FEATURE(dataset->oloc.file, H5FD_FEAT_HAS_MPI))) + !(H5F_HAS_FEATURE(dataset->oloc.file, H5FD_FEAT_HAS_MPI))) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based drivers only") #endif /*H5_HAVE_PARALLEL*/ @@ -603,7 +603,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace") HDassert(projected_mem_space); HDassert(adj_buf); - + /* Switch to using projected memory dataspace & adjusted buffer */ mem_space = projected_mem_space; buf = adj_buf; @@ -950,6 +950,9 @@ H5D_ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id, const H5S_t *file_space, const H5S_t *mem_space, const H5D_type_info_t *type_info, const H5D_chunk_map_t *fm) { + H5P_genplist_t *dx_plist; /* Data transer property list */ + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode; /* performed chunk optimization */ + H5D_mpio_actual_io_mode_t actual_io_mode; /* performed io mode */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_ioinfo_adjust) @@ -963,6 +966,20 @@ H5D_ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id, HDassert(type_info->tpath); HDassert(io_info); + /* Get the dataset transfer property list */ + if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(dxpl_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") + + /* Reset the actual io mode properties to the default values in case + * the dxpl was previously used in a collective I/O operation. + */ + actual_chunk_opt_mode = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode = H5D_MPIO_NO_COLLECTIVE; + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property") + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") + /* Make any parallel I/O adjustments */ if(io_info->using_mpi_vfd) { htri_t opt; /* Flag whether a selection is optimizable */ @@ -995,12 +1012,6 @@ H5D_ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id, * mark it so that we remember to revert the change. */ if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) { - H5P_genplist_t *dx_plist; /* Data transer property list */ - - /* Get the dataset transfer property list */ - if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(dxpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list") - /* Change the xfer_mode to independent for handling the I/O */ io_info->dxpl_cache->xfer_mode = H5FD_MPIO_INDEPENDENT; if(H5P_set(dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &io_info->dxpl_cache->xfer_mode) < 0) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 1c69305..8c78af2 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -471,6 +471,8 @@ H5D_contig_collective_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_i hsize_t UNUSED nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_chunk_map_t UNUSED *fm) { + H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CONTIGUOUS_COLLECTIVE; + H5P_genplist_t *dx_plist; /* Pointer to DXPL */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5D_contig_collective_read, FAIL) @@ -483,6 +485,16 @@ H5D_contig_collective_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_i if(H5D_inter_collective_io(io_info, type_info, file_space, mem_space) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't finish shared collective MPI-IO") + /* Obtain the data transfer properties */ + if(NULL == (dx_plist = H5I_object(io_info->dxpl_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") + + /* Set the actual I/O mode property. internal_collective_io will not break to + * independent I/O, so we set it here. + */ + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_contig_collective_read() */ @@ -506,6 +518,8 @@ H5D_contig_collective_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_ hsize_t UNUSED nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_chunk_map_t UNUSED *fm) { + H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CONTIGUOUS_COLLECTIVE; + H5P_genplist_t *dx_plist; /* Pointer to DXPL */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5D_contig_collective_write, FAIL) @@ -518,6 +532,16 @@ H5D_contig_collective_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_ if(H5D_inter_collective_io(io_info, type_info, file_space, mem_space) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't finish shared collective MPI-IO") + /* Obtain the data transfer properties */ + if(NULL == (dx_plist = H5I_object(io_info->dxpl_id))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") + + /* Set the actual I/O mode property. internal_collective_io will not break to + * independent I/O, so we set it here. + */ + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_contig_collective_write() */ @@ -559,6 +583,8 @@ H5D_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info { H5P_genplist_t *dx_plist; /* Pointer to DXPL */ H5FD_mpio_chunk_opt_t chunk_opt_mode; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode; + H5D_mpio_actual_io_mode_t actual_io_mode; int io_option = H5D_MULTI_CHUNK_IO_MORE_OPT; int sum_chunk = -1; #ifdef H5_HAVE_INSTRUMENTED_LIBRARY @@ -646,15 +672,38 @@ H5D_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info /* step 2: Go ahead to do IO.*/ if(io_option == H5D_ONE_LINK_CHUNK_IO || io_option == H5D_ONE_LINK_CHUNK_IO_MORE_OPT) { + /* set the actual io mode properties to the correct values for link chunk io. + * Link chunk I/O does not break to independent, so we can set the actual_io mode + * as well as the optimisation mode. */ + actual_chunk_opt_mode = H5D_MPIO_LINK_CHUNK; + actual_io_mode = H5D_MPIO_CHUNK_COLLECTIVE; + + /* Set the actual chunk opt mode property. */ + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property") + if(H5D_link_chunk_collective_io(io_info, type_info, fm, sum_chunk) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish linked chunk MPI-IO") + + /* Set the actual io mode property. */ + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") } /* end if */ else if(io_option == H5D_MULTI_CHUNK_IO) { + /* Set the actual chunk opt mode property */ + actual_chunk_opt_mode = H5D_MPIO_MULTI_CHUNK_NO_OPT; + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property") + if(H5D_multi_chunk_collective_io_no_opt(io_info, type_info, fm, dx_plist) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish multiple chunk MPI-IO") } /* end if */ else { /*multiple chunk IOs with opt */ + actual_chunk_opt_mode = H5D_MPIO_MULTI_CHUNK; + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property") + if(H5D_multi_chunk_collective_io(io_info, type_info, fm, dx_plist) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish optimized multiple chunk MPI-IO") } /* end else */ @@ -1036,6 +1085,7 @@ H5D_multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ int mpi_rank; #endif size_t u; /* Local index variable */ + H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_NO_COLLECTIVE; /* Local variable for tracking the I/O mode used. */ herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI_NOINIT(H5D_multi_chunk_collective_io) @@ -1115,6 +1165,15 @@ if(H5DEBUG(D)) if(chunk_info) { fspace = chunk_info->fspace; mspace = chunk_info->mspace; + + /* Update the local variable tracking the dxpl's actual io mode property. + * + * Note: H5D_MPIO_COLLECTIVE_MULTI | H5D_MPIO_INDEPENDENT = H5D_MPIO_MIXED + * to ease switching between to mixed I/O without checking the current + * value of the property. You can see the definition in H5Ppublic.h + */ + actual_io_mode = actual_io_mode | H5D_MPIO_CHUNK_COLLECTIVE; + } /* end if */ else { fspace = mspace = NULL; @@ -1151,6 +1210,9 @@ if(H5DEBUG(D)) if(chunk_info) { fspace = chunk_info->fspace; mspace = chunk_info->mspace; + + /* Update the local variable tracking the dxpl's actual io mode. */ + actual_io_mode = actual_io_mode | H5D_MPIO_CHUNK_INDEPENDENT; } /* end if */ else { fspace = mspace = NULL; @@ -1176,6 +1238,10 @@ if(H5DEBUG(D)) } /* end else */ } /* end for */ + /* Write the local value of actual io mode to the DXPL. */ + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") + done: if(chunk_io_option) H5MM_xfree(chunk_io_option); @@ -1226,6 +1292,7 @@ H5D_multi_chunk_collective_io_no_opt(H5D_io_info_t *io_info, int min_chunk = -1; /* Minimum # of chunks all processes will operate on */ int count_chunk; /* How many chunks have we operated on? */ H5D_storage_t store; /* union of EFL and chunk pointer in file space */ + H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_NO_COLLECTIVE; /*Local variable for tracking the I/O modes used. */ herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI_NOINIT(H5D_multi_chunk_collective_io_no_opt) @@ -1308,6 +1375,9 @@ if(H5DEBUG(D)) { if(H5D_ioinfo_xfer_mode(io_info, dx_plist, H5FD_MPIO_INDEPENDENT) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to independent I/O") + /* Update the local variable tracking the dxpl's actual io mode */ + actual_io_mode = actual_io_mode | H5D_MPIO_CHUNK_INDEPENDENT; + /* Load the chunk into cache and lock it. */ if((cacheable = H5D_chunk_cacheable(io_info, udata.addr, io_info->op_type == H5D_IO_OP_WRITE)) < 0) @@ -1364,6 +1434,9 @@ if(H5DEBUG(D)) { /* Set up the storage address information for this chunk */ ctg_store.contig.dset_addr = udata.addr; + /* Update the local variable tracking the dxpl's actual io Mode. */ + actual_io_mode = actual_io_mode | H5D_MPIO_CHUNK_COLLECTIVE; + if(H5D_inter_collective_io(&ctg_io_info, type_info, chunk_info->fspace, chunk_info->mspace) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,"couldn't finish shared collective MPI-IO") } /* end else */ @@ -1376,6 +1449,10 @@ if(H5DEBUG(D)) { chunk_node = H5SL_next(chunk_node); } /* end while */ + /* Write the local value of actual io mode to the DXPL. */ + if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") + done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_multi_chunk_collective_io_no_opt */ diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index cc770df..a4dddf5 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -72,6 +72,8 @@ #define H5D_XFER_MPIO_CHUNK_OPT_HARD_NAME "mpio_chunk_opt_hard" #define H5D_XFER_MPIO_CHUNK_OPT_NUM_NAME "mpio_chunk_opt_num" #define H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME "mpio_chunk_opt_ratio" +#define H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME "actual_chunk_opt_mode" +#define H5D_MPIO_ACTUAL_IO_MODE_NAME "actual_io_mode" #endif /* H5_HAVE_PARALLEL */ #define H5D_XFER_EDC_NAME "err_detect" /* EDC */ #define H5D_XFER_FILTER_CB_NAME "filter_cb" /* Filter callback function */ diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c index 56f9219..cb4ee0e 100644 --- a/src/H5Fsuper_cache.c +++ b/src/H5Fsuper_cache.c @@ -469,7 +469,7 @@ H5F_sblock_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata) /* (Account for the stored EOA being absolute offset -QAK) */ if((eof + sblock->base_addr) < stored_eoa) - HGOTO_ERROR(H5E_FILE, H5E_TRUNCATED, NULL, "truncated file") + HGOTO_ERROR(H5E_FILE, H5E_TRUNCATED, NULL, "truncated file: eof = %llu, sblock->base_addr = %llu, stored_eoa = %llu", (unsigned long long)eof, (unsigned long long)sblock->base_addr, (unsigned long long)stored_eoa) /* * Tell the file driver how much address space has already been diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c index 8530b8b..3408ced 100644 --- a/src/H5Pdxpl.c +++ b/src/H5Pdxpl.c @@ -98,14 +98,20 @@ #define H5D_XFER_MPIO_CHUNK_OPT_HARD_DEF H5FD_MPIO_CHUNK_DEFAULT #define H5D_XFER_MPIO_CHUNK_OPT_NUM_SIZE sizeof(unsigned) #define H5D_XFER_MPIO_CHUNK_OPT_NUM_DEF H5D_ONE_LINK_CHUNK_IO_THRESHOLD -#define H5D_XFER_MPIO_CHUNK_OPT_RATIO_SIZE sizeof(unsigned) +#define H5D_XFER_MPIO_CHUNK_OPT_RATIO_SIZE sizeof(unsigned) #define H5D_XFER_MPIO_CHUNK_OPT_RATIO_DEF H5D_MULTI_CHUNK_IO_COL_THRESHOLD +/* Definitions for chunk opt mode property. */ +#define H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_SIZE sizeof(H5D_mpio_actual_chunk_opt_mode_t) +#define H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_DEF H5D_MPIO_NO_CHUNK_OPTIMIZATION +/* Definitions for chunk io mode property. */ +#define H5D_MPIO_ACTUAL_IO_MODE_SIZE sizeof(H5D_mpio_actual_io_mode_t) +#define H5D_MPIO_ACTUAL_IO_MODE_DEF H5D_MPIO_NO_COLLECTIVE /* Definitions for EDC property */ -#define H5D_XFER_EDC_SIZE sizeof(H5Z_EDC_t) -#define H5D_XFER_EDC_DEF H5Z_ENABLE_EDC +#define H5D_XFER_EDC_SIZE sizeof(H5Z_EDC_t) +#define H5D_XFER_EDC_DEF H5Z_ENABLE_EDC /* Definitions for filter callback function property */ -#define H5D_XFER_FILTER_CB_SIZE sizeof(H5Z_cb_t) -#define H5D_XFER_FILTER_CB_DEF {NULL,NULL} +#define H5D_XFER_FILTER_CB_SIZE sizeof(H5Z_cb_t) +#define H5D_XFER_FILTER_CB_DEF {NULL,NULL} /* Definitions for type conversion callback function property */ #define H5D_XFER_CONV_CB_SIZE sizeof(H5T_conv_cb_t) #define H5D_XFER_CONV_CB_DEF {NULL,NULL} @@ -205,6 +211,8 @@ H5P_dxfr_reg_prop(H5P_genclass_t *pclass) H5FD_mpio_collective_opt_t def_mpio_collective_opt_mode = H5D_XFER_MPIO_COLLECTIVE_OPT_DEF; unsigned def_mpio_chunk_opt_num = H5D_XFER_MPIO_CHUNK_OPT_NUM_DEF; unsigned def_mpio_chunk_opt_ratio = H5D_XFER_MPIO_CHUNK_OPT_RATIO_DEF; + H5D_mpio_actual_chunk_opt_mode_t def_mpio_actual_chunk_opt_mode = H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_DEF; + H5D_mpio_actual_io_mode_t def_mpio_actual_io_mode = H5D_MPIO_ACTUAL_IO_MODE_DEF; #endif /* H5_HAVE_PARALLEL */ H5Z_EDC_t enable_edc = H5D_XFER_EDC_DEF; /* Default value for EDC property */ H5Z_cb_t filter_cb = H5D_XFER_FILTER_CB_DEF; /* Default value for filter callback */ @@ -268,7 +276,7 @@ H5P_dxfr_reg_prop(H5P_genclass_t *pclass) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") #ifdef H5_HAVE_PARALLEL - /* Register the I/O transfer mode property */ + /* Register the I/O transfer mode properties */ if(H5P_register_real(pclass, H5D_XFER_IO_XFER_MODE_NAME, H5D_XFER_IO_XFER_MODE_SIZE, &def_io_xfer_mode, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") if(H5P_register_real(pclass, H5D_XFER_MPIO_COLLECTIVE_OPT_NAME, H5D_XFER_MPIO_COLLECTIVE_OPT_SIZE, &def_mpio_collective_opt_mode, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) @@ -279,6 +287,14 @@ H5P_dxfr_reg_prop(H5P_genclass_t *pclass) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") if(H5P_register_real(pclass, H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME, H5D_XFER_MPIO_CHUNK_OPT_RATIO_SIZE, &def_mpio_chunk_opt_ratio, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + + /* Register the chunk optimization mode property. */ + if(H5P_register_real(pclass, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_SIZE, &def_mpio_actual_chunk_opt_mode, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") + + /* Register the actual io mode property. */ + if(H5P_register_real(pclass, H5D_MPIO_ACTUAL_IO_MODE_NAME, H5D_MPIO_ACTUAL_IO_MODE_SIZE, &def_mpio_actual_io_mode, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") #endif /* H5_HAVE_PARALLEL */ /* Register the EDC property */ @@ -1483,3 +1499,74 @@ done: FUNC_LEAVE_API(ret_value) } /* end H5Pget_hyper_vector_size() */ + +#ifdef H5_HAVE_PARALLEL +/*------------------------------------------------------------------------- + * Function: H5Pget_mpio_actual_chunk_opt_mode + * + * Purpose: Retrieves the chunked io optimization scheme that library chose + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Jacob Gruber + * Wednesday, May 4, 2011 + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_mpio_actual_chunk_opt_mode(hid_t plist_id, H5D_mpio_actual_chunk_opt_mode_t *actual_chunk_opt_mode) +{ + H5P_genplist_t *plist; + herr_t ret_value = SUCCEED; /* return value */ + + FUNC_ENTER_API(H5Pget_mpio_actual_chunk_opt_mode, FAIL) + H5TRACE2("e","ix", plist_id, actual_chunk_opt_mode); + + /* Get the plist structure */ + if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID") + + /* Return values */ + if(actual_chunk_opt_mode) + if(H5P_get(plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, actual_chunk_opt_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get value") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_mpio_actual_chunk_opt_mode() */ + + +/*------------------------------------------------------------------------- + * Function: H5Pget_mpio_actual_io_mode + * + * Purpose: Retrieves the type of I/O actually preformed when collective I/O + * is requested. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Jacob Gruber + * Wednesday, May 4, 2011 + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_mpio_actual_io_mode(hid_t plist_id, H5D_mpio_actual_io_mode_t * actual_io_mode) +{ + H5P_genplist_t *plist; + herr_t ret_value = SUCCEED; /* return value */ + + FUNC_ENTER_API(H5Pget_mpio_actual_io_mode, FAIL) + H5TRACE2("e","ix", plist_id, actual_io_mode); + + /* Get the plist structure */ + if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID") + + /* Return values */ + if(actual_io_mode) + if(H5P_get(plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, actual_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get value") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_mpio_actual_io_mode() */ +#endif /* H5_HAVE_PARALLEL */ + diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 80c028a..492a468 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -120,6 +120,38 @@ typedef H5P_prp_cb1_t H5P_prp_close_func_t; /* Define property list iteration function type */ typedef herr_t (*H5P_iterate_t)(hid_t id, const char *name, void *iter_data); +/* Actual IO mode property */ +typedef enum H5D_mpio_actual_chunk_opt_mode_t { + /* The default value, H5D_MPIO_NO_CHUNK_OPTIMIZATION, is used for all I/O + * operations that do not use chunk optimizations, including non-collective + * I/O and contiguous collective I/O. + */ + H5D_MPIO_NO_CHUNK_OPTIMIZATION = 0, + H5D_MPIO_LINK_CHUNK, + H5D_MPIO_MULTI_CHUNK, + H5D_MPIO_MULTI_CHUNK_NO_OPT +} H5D_mpio_actual_chunk_opt_mode_t; + +typedef enum H5D_mpio_actual_io_mode_t { + /* The following four values are conveniently defined as a bit field so that + * we can switch from the default to indpendent or collective and then to + * mixed without having to check the original value. + * + * NO_COLLECTIVE means that either collective I/O wasn't requested or that + * no I/O took place. + * + * CHUNK_INDEPENDENT means that collective I/O was requested, but the + * chunk optimization scheme chose independent I/O for each chunk. + */ + H5D_MPIO_NO_COLLECTIVE = 0x0, + H5D_MPIO_CHUNK_INDEPENDENT = 0x1, + H5D_MPIO_CHUNK_COLLECTIVE = 0x2, + H5D_MPIO_CHUNK_MIXED = 0x1 | 0x2, + + /* The contiguous case is separate from the bit field. */ + H5D_MPIO_CONTIGUOUS_COLLECTIVE = 0x4 +} H5D_mpio_actual_io_mode_t; + /********************/ /* Public Variables */ /********************/ @@ -359,6 +391,10 @@ H5_DLL herr_t H5Pset_type_conv_cb(hid_t dxpl_id, H5T_conv_except_func_t op, void H5_DLL herr_t H5Pget_type_conv_cb(hid_t dxpl_id, H5T_conv_except_func_t *op, void** operate_data); H5_DLL herr_t H5Pset_enum_conv_overflow(hid_t plist_id, hbool_t conv_overflow); H5_DLL herr_t H5Pget_enum_conv_overflow(hid_t plist_id, hbool_t *conv_overflow/*out*/); +#ifdef H5_HAVE_PARALLEL +H5_DLL herr_t H5Pget_mpio_actual_chunk_opt_mode(hid_t plist_id, H5D_mpio_actual_chunk_opt_mode_t *actual_chunk_opt_mode); +H5_DLL herr_t H5Pget_mpio_actual_io_mode(hid_t plist_id, H5D_mpio_actual_io_mode_t *actual_io_mode); +#endif /* H5_HAVE_PARALLEL */ /* Link creation property list (LCPL) routines */ H5_DLL herr_t H5Pset_create_intermediate_group(hid_t plist_id, unsigned crt_intmd); diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 21e25b6..d2f061d 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -2498,3 +2498,552 @@ none_selection_chunk(void) if(data_origin) free(data_origin); if(data_array) free(data_array); } + + +/* Function: test_actual_io_mode + * + * Purpose: tests one specific case of collective I/O and checks that the + * actual_chunk_opt_mode property and the actual_io_mode + * properties in the DXPL have the correct values. + * + * Input: selection_mode: changes the way processes select data from the space, as well + * as some dxpl flags to get collective I/O to break in different ways. + * + * The relevant I/O function and expected response for each mode: + * TEST_ACTUAL_IO_MULTI_CHUNK_IND: + * H5D_mpi_chunk_collective_io, each process reports independent I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_COL: + * H5D_mpi_chunk_collective_io, each process reports collective I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_MIX: + * H5D_mpi_chunk_collective_io, each process reports mixed I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: + * H5D_mpi_chunk_collective_io, processes disagree. The root reports + * collective, the rest report independent I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL: + * H5D_mpi_chunk_collective_io_no_opt, each process reports collective I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE: + * H5D_mpi_chunk_collective_io_no_opt, processes disagree + * (collective and mixed I/O) + * + * TEST_ACTUAL_IO_LINK_CHUNK: + * H5D_link_chunk_collective_io, processes report linked chunk I/O + * + * TEST_ACTUAL_IO_CONTIGUOUS: + * H5D_contig_collective_write or H5D_contig_collective_read + * each process reports contiguous collective I/O + * + * TEST_ACTUAL_IO_NO_COLLECTIVE: + * Simple independent I/O. This tests that the defaults are properly set. + * + * TEST_ACTUAL_IO_RESET: + * Perfroms collective and then independent I/O wit hthe same dxpl to + * make sure the peroperty is correctly reset to the default on each use. + * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE + * (The most complex case that works on all builds) and then performs + * an independent read and write with the same dxpls. + * + * It may seem like TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_IND and + * TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX have been accidentally + * left out. This is intentional; the other test cases sufficiently + * cover all cases for Multi Chunk No Opt I/O. + * + * Programmer: Jacob Gruber + * Date: 2011-04-06 + */ +static void +test_actual_io_mode(int selection_mode) { + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1; + H5D_mpio_actual_io_mode_t actual_io_mode_write = -1; + H5D_mpio_actual_io_mode_t actual_io_mode_read = -1; + H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1; + const char * filename; + const char * test_name; + hbool_t multi_chunk_no_opt; + hbool_t multi_chunk_with_opt; + hbool_t is_chunked; + hbool_t is_collective; + int mpi_size = -1; + int mpi_rank = -1; + int length; + int * buffer; + int i; + MPI_Comm mpi_comm = MPI_COMM_NULL; + MPI_Info mpi_info = MPI_INFO_NULL; + hid_t fid = -1; + hid_t sid = -1; + hid_t dataset = -1; + hid_t data_type = H5T_NATIVE_INT; + hid_t fapl = -1; + hid_t mem_space = -1; + hid_t file_space = -1; + hid_t dcpl = -1; + hid_t dxpl_write = -1; + hid_t dxpl_read = -1; + hsize_t dims[RANK]; + hsize_t chunk_dims[RANK]; + hsize_t start[RANK]; + hsize_t stride[RANK]; + hsize_t count[RANK]; + hsize_t block[RANK]; + hbool_t use_gpfs = FALSE; + herr_t ret; + + /* Set up some flags to make some future if statements slightly more readable */ + multi_chunk_no_opt = ( + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_IND || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE ); + + /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then + * tests independent I/O + */ + multi_chunk_with_opt = ( + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || + selection_mode == TEST_ACTUAL_IO_RESET ); + + is_chunked = ( + selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && + selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); + + is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE; + + /* Set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + MPI_Barrier(MPI_COMM_WORLD); + + HDassert(mpi_size >= 1); + + mpi_comm = MPI_COMM_WORLD; + mpi_info = MPI_INFO_NULL; + + filename = (const char *)GetTestParameters(); + HDassert(filename != NULL); + + /* Setup the file access template */ + fapl = create_faccess_plist(mpi_comm, mpi_info, facc_type, use_gpfs); + VRFY((fapl >= 0), "create_faccess_plist() succeeded"); + + /* Create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Create the basic Space */ + dims[0] = dim0; + dims[1] = dim1; + sid = H5Screate_simple (RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* Create the dataset creation plist */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "dataset creation plist created successfully"); + + /* If we are not testing contiguous datasets */ + if(is_chunked) { + /* Set up chunk information. */ + chunk_dims[0] = dims[0]/mpi_size; + chunk_dims[1] = dims[1]; + ret = H5Pset_chunk(dcpl, 2, chunk_dims); + VRFY((ret >= 0),"chunk creation property list succeeded"); + } + + /* Create the dataset */ + dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, + dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); + + /* Create the file dataspace */ + file_space = H5Dget_space(dataset); + VRFY((file_space >= 0), "H5Dget_space succeeded"); + + /* Choose a selection method based on the type of I/O we want to occur, + * and also set up some selection-dependeent test info. */ + switch(selection_mode) { + + /* Independent I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_IND: + /* Since the dataset is chunked by row and each process selects a row, + * each process writes to a different chunk. This forces all I/O to be + * independent. + */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Multi Chunk - Independent"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + break; + + /* Collective I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_COL: + /* The dataset is chunked by rows, so each process takes a column which + * spans all chunks. Since the processes write non-overlapping regular + * selections to each chunk, the operation is purely collective. + */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + test_name = "Multi Chunk - Collective"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + break; + + /* Mixed I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_MIX: + /* A chunk will be assigned collective I/O only if it is selected by each + * process. To get mixed I/O, have the root select all chunks and each + * subsequent process select the first and nth chunk. The first chunk, + * accessed by all, will be assigned collective I/O while each other chunk + * will be accessed only by the root and the nth procecess and will be + * assigned independent I/O. Each process will access one chunk collectively + * and at least one chunk independently, reporting mixed I/O. + */ + + if(mpi_rank == 0) { + /* Select the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + } else { + /* Select the first and the nth chunk in the nth column */ + block[0] = dim0 / mpi_size; + block[1] = dim1 / mpi_size; + count[0] = 2; + count[1] = 1; + stride[0] = mpi_rank * block[0]; + stride[1] = 1; + start[0] = 0; + start[1] = mpi_rank*block[1]; + } + + test_name = "Multi Chunk - Mixed"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + break; + + /* RESET tests that the properties are properly reset to defaults each time I/O is + * performed. To acheive this, we have RESET perform collective I/O (which would change + * the values from the defaults) followed by independent I/O (which should report the + * default values). RESET doesn't need to have a unique selection, so we reuse + * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works + * on all builds. The independent section of RESET can be found at the end of this function. + */ + case TEST_ACTUAL_IO_RESET: + + /* Mixed I/O with optimization and internal disagreement */ + case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: + /* A chunk will be assigned collective I/O only if it is selected by each + * process. To get mixed I/O with disagreement, assign process n to the + * first chunk and the nth chunk. The first chunk, selected by all, is + * assgigned collective I/O, while each other process gets independent I/O. + * Since the root process with only access the first chunk, it will report + * collective I/O. The subsequent processes will access the first chunk + * collectively, and their other chunk indpendently, reporting mixed I/O. + */ + + if(mpi_rank == 0) { + /* Select the first chunk in the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + block[0] = block[0] / mpi_size; + } else { + /* Select the first and the nth chunk in the nth column */ + block[0] = dim0 / mpi_size; + block[1] = dim1 / mpi_size; + count[0] = 2; + count[1] = 1; + stride[0] = mpi_rank * block[0]; + stride[1] = 1; + start[0] = 0; + start[1] = mpi_rank*block[1]; + } + + /* If the testname was not already set by the RESET case */ + if (selection_mode == TEST_ACTUAL_IO_RESET) + test_name = "RESET"; + else + test_name = "Multi Chunk - Mixed (Disagreement)"; + + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + + if(mpi_rank == 0) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + + break; + + /* Collective I/O without optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL: + /* The dataset is chunked by rows, so when each process takes a column, its + * selection spans all chunks. Since no process writes more chunks than any + * other, colective I/O is never broken. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + test_name = "Multi Chunk No Opt - Collective"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK_NO_OPT; + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + break; + + + /* Mixed I/O without optimization with disagreement */ + case TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE: + /* Each process takes a column, but the root's column is shortened so that + * it only reads the first chunk. Since all the other processes are writing + * to more chunks, they will break collective after the first chunk. + */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + if(mpi_rank == 0) + block[0] = block[0] / mpi_size; + + test_name = "Multi Chunk No Opt - Mixed (Disagreement)"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK_NO_OPT; + + if(mpi_rank == 0) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + + break; + + /* Linked Chunk I/O */ + case TEST_ACTUAL_IO_LINK_CHUNK: + /* Nothing special; link chunk I/O is forced in the dxpl settings. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Link Chunk"; + actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + break; + + /* Contiguous Dataset */ + case TEST_ACTUAL_IO_CONTIGUOUS: + /* A non overlapping, regular selection in a contiguous dataset leads to + * collective I/O */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Contiguous"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; + break; + + case TEST_ACTUAL_IO_NO_COLLECTIVE: + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Independent"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + break; + + default: + test_name = "Undefined Selection Mode"; + actual_chunk_opt_mode_expected = -1; + actual_io_mode_expected = -1; + break; + } + + /* Reset the expected values to defulats if the MPI_POSIX driver is in use. + * This property is defined only for mpio, not MPI POSIX. */ + if (facc_type == FACC_MPIPOSIX) { + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + } + + ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* Create a memory dataspace mirroring the dataset and select the same hyperslab + * as in the file space. + */ + mem_space = H5Screate_simple (RANK, dims, NULL); + VRFY((mem_space >= 0), "mem_space created"); + + ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + + /* Get the number of elements in the selection */ + length = dim0 * dim1; + + /* Allocate and initialize the buffer */ + buffer = (int *)HDmalloc(sizeof(int) * length); + VRFY((buffer != NULL), "malloc of buffer succeeded"); + for(i = 0; i < length; i++) + buffer[i] = i; + + /* Set up the dxpl for the write */ + dxpl_write = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + /* Set collective I/O properties in the dxpl. */ + if(is_collective) { + /* Request collective I/O */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Set the threshold number of processes per chunk for link chunk I/O + * to twice mpi_size. This will prevent the threshold from ever being + * met, thus forcing multi chunk io instead of link chunk io. + */ + if(multi_chunk_with_opt) { + ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); + + ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); + } + + /* Request multi chunk I/O without optimization */ + if(multi_chunk_no_opt) { + ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + } + + /* Make a copy of the dxpl to test the read operation */ + dxpl_read = H5Pcopy(dxpl_write); + VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); + + /* Write */ + ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); + if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Retreive Actual io valuess */ + ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); + VRFY((ret >= 0), "retriving actual io mode suceeded" ); + + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); + VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" ); + + /* Read */ + ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); + if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); + + /* Retreive Actual io values */ + ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); + VRFY((ret >= 0), "retriving actual io mode succeeded" ); + + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); + VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" ); + + /* Check write vs read */ + VRFY((actual_io_mode_read == actual_io_mode_write), + "reading and writing are the same for actual_io_mode"); + VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), + "reading and writing are the same for actual_chunk_opt_mode"); + + + /* Test values */ + if(actual_chunk_opt_mode_expected != (unsigned) -1 && actual_io_mode_expected != (unsigned) -1) { + char message[100]; + sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name); + VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); + sprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name); + VRFY((actual_io_mode_write == actual_io_mode_expected), message); + } else { + HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, + actual_chunk_opt_mode_write, actual_io_mode_write); + } + + /* To test that the property is succesfully reset to the default, we perform some + * independent I/O after the collective I/O + */ + if (selection_mode == TEST_ACTUAL_IO_RESET) { + if (mpi_rank == 0) { + /* Switch to independent io */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Write */ + ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Check Properties */ + ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); + VRFY( (ret >= 0), "retriving actual io mode succeeded" ); + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); + VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" ); + + VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION, + "actual_chunk_opt_mode has correct value for reset write (independent)"); + VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE, + "actual_io_mode has correct value for reset write (independent)"); + + /* Read */ + ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Check Properties */ + ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); + VRFY( (ret >= 0), "retriving actual io mode succeeded" ); + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); + VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" ); + + VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION, + "actual_chunk_opt_mode has correct value for reset read (independent)"); + VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE, + "actual_io_mode has correct value for reset read (independent)"); + } + } + + /* Release some resources */ + ret = H5Sclose(sid); + ret = H5Pclose(fapl); + ret = H5Pclose(dcpl); + ret = H5Pclose(dxpl_write); + ret = H5Pclose(dxpl_read); + ret = H5Dclose(dataset); + ret = H5Sclose(mem_space); + ret = H5Sclose(file_space); + ret = H5Fclose(fid); + HDfree(buffer); + return; +} + + +/* Function: actual_io_mode_tests + * + * Purpose: Tests all possible cases of the actual_io_mode property. + * + * Programmer: Jacob Gruber + * Date: 2011-04-06 + */ +void +actual_io_mode_tests(void) { + int mpi_size = -1; + int mpi_rank = -1; + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank); + + test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); + + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); + + /* The Multi Chunk Mixed test requires atleast three processes. */ + if (mpi_size > 2) + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); + else + HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n"); + + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); + + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL); + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE); + + test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); + test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); + + test_actual_io_mode(TEST_ACTUAL_IO_RESET); + return; +} diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index 07568d4..e3c72ef 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -498,6 +498,9 @@ int main(int argc, char **argv) "test mpi derived type management", PARATESTFILE); + AddTest("actualio", actual_io_mode_tests, NULL, + "test actual io mode proprerty", + PARATESTFILE); /* Display testing information */ TestInfo(argv[0]); diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 2b83047..11656f9 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -160,6 +160,19 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, #define NPOINTS 4 /* Number of points that will be selected and overwritten */ +/* Definitions of the selection mode for the test_actual_io_function. */ +#define TEST_ACTUAL_IO_NO_COLLECTIVE 0 +#define TEST_ACTUAL_IO_RESET 1 +#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2 +#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3 +#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4 +#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5 +#define TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_IND 6 +#define TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_COL 7 +#define TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE 8 +#define TEST_ACTUAL_IO_LINK_CHUNK 9 +#define TEST_ACTUAL_IO_CONTIGUOUS 10 + /* Don't erase these lines, they are put here for debugging purposes */ /* #define MSPACE1_RANK 1 @@ -173,9 +186,9 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, #define MSPACE_DIM1 8 #define MSPACE_DIM2 9 #define NPOINTS 4 +*/ /* end of debugging macro */ -*/ /* end of debugging macro */ /* type definitions */ typedef struct H5Ptest_param_t /* holds extra test parameters */ { @@ -222,6 +235,7 @@ void dataset_readAll(void); void extend_readInd(void); void extend_readAll(void); void none_selection_chunk(void); +void actual_io_mode_tests(void); void test_chunk_alloc(void); void test_filter_read(void); void compact_dataset(void); |