diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/H5Dchunk.c | 999 | ||||
-rw-r--r-- | src/H5Dcompact.c | 25 | ||||
-rw-r--r-- | src/H5Dcontig.c | 279 | ||||
-rw-r--r-- | src/H5Defl.c | 28 | ||||
-rw-r--r-- | src/H5Dint.c | 54 | ||||
-rw-r--r-- | src/H5Dio.c | 1528 | ||||
-rw-r--r-- | src/H5Dmpio.c | 1632 | ||||
-rw-r--r-- | src/H5Dpkg.h | 218 | ||||
-rw-r--r-- | src/H5Dprivate.h | 5 | ||||
-rw-r--r-- | src/H5Dpublic.h | 26 | ||||
-rw-r--r-- | src/H5Dscatgath.c | 26 | ||||
-rw-r--r-- | src/H5Dselect.c | 14 | ||||
-rw-r--r-- | src/H5Dvirtual.c | 143 | ||||
-rw-r--r-- | src/H5FDmpi.h | 1 | ||||
-rw-r--r-- | src/H5FDmpio.c | 8 | ||||
-rw-r--r-- | src/H5Fmpi.c | 8 | ||||
-rw-r--r-- | src/H5Ppublic.h | 3 | ||||
-rw-r--r-- | src/H5trace.c | 8 |
18 files changed, 2483 insertions, 2522 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index c3a3d9d..7c4eb15 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -69,8 +69,8 @@ /****************/ /* Macros for iterating over chunks to operate on */ -#define H5D_CHUNK_GET_FIRST_NODE(map) (map->use_single ? (H5SL_node_t *)(1) : H5SL_first(map->sel_chunks)) -#define H5D_CHUNK_GET_NODE_INFO(map, node) (map->use_single ? map->single_chunk_info : (H5D_chunk_info_t *)H5SL_item(node)) +#define H5D_CHUNK_GET_FIRST_NODE(map) (map->use_single ? (H5SL_node_t *)(1) : H5SL_first(map->dset_sel_pieces)) +#define H5D_CHUNK_GET_NODE_INFO(map, node) (map->use_single ? map->single_piece_info : (H5D_piece_info_t *)H5SL_item(node)) #define H5D_CHUNK_GET_NEXT_NODE(map, node) (map->use_single ? (H5SL_node_t *)NULL : H5SL_next(node)) /* Sanity check on chunk index types: commonly used by a lot of routines in this file */ @@ -205,14 +205,6 @@ typedef struct H5D_chunk_readvv_ud_t { hid_t dxpl_id; /* DXPL for operation */ } H5D_chunk_readvv_ud_t; -/* Callback info for file selection iteration */ -typedef struct H5D_chunk_file_iter_ud_t { - H5D_chunk_map_t *fm; /* File->memory chunk mapping info */ -#ifdef H5_HAVE_PARALLEL - const H5D_io_info_t *io_info; /* I/O info for operation */ -#endif /* H5_HAVE_PARALLEL */ -} H5D_chunk_file_iter_ud_t; - #ifdef H5_HAVE_PARALLEL /* information to construct a collective I/O operation for filling chunks */ typedef struct H5D_chunk_coll_info_t { @@ -227,19 +219,17 @@ typedef struct H5D_chunk_coll_info_t { /* Chunked layout operation callbacks */ static herr_t H5D__chunk_construct(H5F_t *f, H5D_t *dset); -static herr_t H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, - hid_t dapl_id); -static herr_t H5D__chunk_io_init(const H5D_io_info_t *io_info, +static herr_t H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id); +static herr_t H5D__chunk_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, H5D_chunk_map_t *fm); + const H5S_t *mem_space, H5D_dset_info_t *dinfo); static herr_t H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *fm); + H5D_dset_info_t *dinfo); static herr_t H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *fm); + H5D_dset_info_t *dinfo); static herr_t H5D__chunk_flush(H5D_t *dset, hid_t dxpl_id); -static herr_t H5D__chunk_io_term(const H5D_chunk_map_t *fm); static herr_t H5D__chunk_dest(H5D_t *dset, hid_t dxpl_id); /* "Nonexistent" layout operation callback */ @@ -260,16 +250,17 @@ static herr_t H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *udata); static hbool_t H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *udata); -static herr_t H5D__free_chunk_info(void *item, void *key, void *opdata); -static herr_t H5D__create_chunk_map_single(H5D_chunk_map_t *fm, - const H5D_io_info_t *io_info); -static herr_t H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, +static herr_t H5D__free_piece_info(void *item, void *key, void *opdata); +static herr_t H5D__create_piece_map_single(H5D_dset_info_t *di, + const H5D_io_info_t* io_info); +static herr_t H5D__create_piece_file_map_hyper(H5D_dset_info_t *di, const H5D_io_info_t *io_info); -static herr_t H5D__create_chunk_mem_map_hyper(const H5D_chunk_map_t *fm); -static herr_t H5D__chunk_file_cb(void *elem, const H5T_t *type, unsigned ndims, - const hsize_t *coords, void *fm); -static herr_t H5D__chunk_mem_cb(void *elem, const H5T_t *type, unsigned ndims, - const hsize_t *coords, void *fm); +static herr_t H5D__create_piece_mem_map_hyper(const H5D_io_info_t *io_info, + const H5D_dset_info_t *dinfo); +static herr_t H5D__piece_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t *type, unsigned ndims, + const hsize_t *coords, void *_opdata); +static herr_t H5D__piece_mem_cb(void *elem, const H5T_t *type, unsigned ndims, + const hsize_t *coords, void *_opdata); static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled); static herr_t H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset); @@ -305,13 +296,13 @@ const H5D_layout_ops_t H5D_LOPS_CHUNK[1] = {{ H5D__chunk_read, H5D__chunk_write, #ifdef H5_HAVE_PARALLEL - H5D__chunk_collective_read, - H5D__chunk_collective_write, + H5D__collective_read, + H5D__collective_write, #endif /* H5_HAVE_PARALLEL */ NULL, NULL, H5D__chunk_flush, - H5D__chunk_io_term, + H5D__piece_io_term, H5D__chunk_dest }}; @@ -345,8 +336,8 @@ H5FL_SEQ_DEFINE_STATIC(H5D_rdcc_ent_ptr_t); /* Declare a free list to manage H5D_rdcc_ent_t objects */ H5FL_DEFINE_STATIC(H5D_rdcc_ent_t); -/* Declare a free list to manage the H5D_chunk_info_t struct */ -H5FL_DEFINE(H5D_chunk_info_t); +/* Declare a free list to manage the H5D_piece_info_t struct */ +H5FL_DEFINE(H5D_piece_info_t); /* Declare a free list to manage the chunk sequence information */ H5FL_BLK_DEFINE_STATIC(chunk); @@ -811,25 +802,23 @@ H5D__chunk_is_space_alloc(const H5O_storage_t *storage) FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_is_space_alloc() */ - /*------------------------------------------------------------------------- * Function: H5D__chunk_io_init * * Purpose: Performs initialization before any sort of I/O on the raw data + * This was derived from H5D__chunk_io_init for multi-dset work. * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Thursday, March 20, 2008 - * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, +H5D__chunk_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *fm) + H5D_dset_info_t *dinfo) { - const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */ + const H5D_t *dataset = dinfo->dset; /* Local pointer to dataset info */ const H5T_t *mem_type = type_info->mem_type; /* Local pointer to memory datatype */ H5S_t *tmp_mspace = NULL; /* Temporary memory dataspace */ hssize_t old_offset[H5O_LAYOUT_NDIMS]; /* Old selection offset */ @@ -841,22 +830,24 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf H5SL_node_t *curr_node; /* Current node in skip list */ char bogus; /* "bogus" buffer to pass to selection iterator */ unsigned u; /* Local index variable */ + H5D_io_info_wrap_t io_info_wrap; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* Get layout for dataset */ - fm->layout = &(dataset->shared->layout); - fm->nelmts = nelmts; + dinfo->layout = &(dataset->shared->layout); + /* num of element selected */ + dinfo->nelmts = nelmts; /* Check if the memory space is scalar & make equivalent memory space */ if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number") /* Set the number of dimensions for the memory dataspace */ - H5_CHECKED_ASSIGN(fm->m_ndims, unsigned, sm_ndims, int); + H5_CHECKED_ASSIGN(dinfo->m_ndims, unsigned, sm_ndims, int); /* Get rank for file dataspace */ - fm->f_ndims = f_ndims = dataset->shared->layout.u.chunk.ndims - 1; + dinfo->f_ndims = f_ndims = dataset->shared->layout.u.chunk.ndims - 1; /* Normalize hyperslab selections by adjusting them by the offset */ /* (It might be worthwhile to normalize both the file and memory dataspaces @@ -868,31 +859,17 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset") /* Decide the number of chunks in each dimension*/ - for(u = 0; u < f_ndims; u++) { + for(u = 0; u < f_ndims; u++) /* Keep the size of the chunk dimensions as hsize_t for various routines */ - fm->chunk_dim[u] = fm->layout->u.chunk.dim[u]; - } /* end for */ - -#ifdef H5_HAVE_PARALLEL - /* Calculate total chunk in file map*/ - fm->select_chunk = NULL; - if(io_info->using_mpi_vfd) { - H5_CHECK_OVERFLOW(fm->layout->u.chunk.nchunks, hsize_t, size_t); - if(fm->layout->u.chunk.nchunks) { - if(NULL == (fm->select_chunk = (H5D_chunk_info_t **)H5MM_calloc((size_t)fm->layout->u.chunk.nchunks * sizeof(H5D_chunk_info_t *)))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info") - } - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ - + dinfo->chunk_dim[u] = dinfo->layout->u.chunk.dim[u]; /* Initialize "last chunk" information */ - fm->last_index = (hsize_t)-1; - fm->last_chunk_info = NULL; + dinfo->last_index = (hsize_t)-1; + dinfo->last_piece_info = NULL; /* Point at the dataspaces */ - fm->file_space = file_space; - fm->mem_space = mem_space; + dinfo->file_space = file_space; + dinfo->mem_space = mem_space; /* Special case for only one element in selection */ /* (usually appending a record) */ @@ -902,8 +879,8 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf #endif /* H5_HAVE_PARALLEL */ && H5S_SEL_ALL != H5S_GET_SELECT_TYPE(file_space)) { /* Initialize skip list for chunk selections */ - fm->sel_chunks = NULL; - fm->use_single = TRUE; + //io_info->sel_pieces = NULL; + dinfo->use_single = TRUE; /* Initialize single chunk dataspace */ if(NULL == dataset->shared->cache.chunk.single_space) { @@ -912,29 +889,29 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy file space") /* Resize chunk's dataspace dimensions to size of chunk */ - if(H5S_set_extent_real(dataset->shared->cache.chunk.single_space, fm->chunk_dim) < 0) + if(H5S_set_extent_real(dataset->shared->cache.chunk.single_space, dinfo->chunk_dim) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSET, FAIL, "can't adjust chunk dimensions") /* Set the single chunk dataspace to 'all' selection */ if(H5S_select_all(dataset->shared->cache.chunk.single_space, TRUE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to set all selection") } /* end if */ - fm->single_space = dataset->shared->cache.chunk.single_space; - HDassert(fm->single_space); + dinfo->single_space = dataset->shared->cache.chunk.single_space; + HDassert(dinfo->single_space); - /* Allocate the single chunk information */ - if(NULL == dataset->shared->cache.chunk.single_chunk_info) { - if(NULL == (dataset->shared->cache.chunk.single_chunk_info = H5FL_MALLOC(H5D_chunk_info_t))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info") + /* Allocate the single piece information */ + if(NULL == dataset->shared->cache.chunk.single_piece_info) { + if(NULL == (dataset->shared->cache.chunk.single_piece_info = H5FL_MALLOC(H5D_piece_info_t))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate piece info") } /* end if */ - fm->single_chunk_info = dataset->shared->cache.chunk.single_chunk_info; - HDassert(fm->single_chunk_info); + dinfo->single_piece_info = dataset->shared->cache.chunk.single_piece_info; + HDassert(dinfo->single_piece_info); /* Reset chunk template information */ - fm->mchunk_tmpl = NULL; + dinfo->mchunk_tmpl = NULL; /* Set up chunk mapping for single element */ - if(H5D__create_chunk_map_single(fm, io_info) < 0) + if(H5D__create_piece_map_single(dinfo, io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create chunk selections for single element") } /* end if */ else { @@ -945,20 +922,21 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf if(NULL == (dataset->shared->cache.chunk.sel_chunks = H5SL_create(H5SL_TYPE_HSIZE, NULL))) HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for chunk selections") } /* end if */ - fm->sel_chunks = dataset->shared->cache.chunk.sel_chunks; - HDassert(fm->sel_chunks); + dinfo->dset_sel_pieces = dataset->shared->cache.chunk.sel_chunks; + HDassert(dinfo->dset_sel_pieces); + HDassert(io_info->sel_pieces); /* We are not using single element mode */ - fm->use_single = FALSE; + dinfo->use_single = FALSE; /* Get type of selection on disk & in memory */ - if((fm->fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE) + if((dinfo->fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE) HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection") - if((fm->msel_type = H5S_GET_SELECT_TYPE(mem_space)) < H5S_SEL_NONE) + if((dinfo->msel_type = H5S_GET_SELECT_TYPE(mem_space)) < H5S_SEL_NONE) HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection") /* If the selection is NONE or POINTS, set the flag to FALSE */ - if(fm->fsel_type == H5S_SEL_POINTS || fm->fsel_type == H5S_SEL_NONE) + if(dinfo->fsel_type == H5S_SEL_POINTS || dinfo->fsel_type == H5S_SEL_NONE) sel_hyper_flag = FALSE; else sel_hyper_flag = TRUE; @@ -966,61 +944,60 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf /* Check if file selection is a not a hyperslab selection */ if(sel_hyper_flag) { /* Build the file selection for each chunk */ - if(H5D__create_chunk_file_map_hyper(fm, io_info) < 0) + if(H5D__create_piece_file_map_hyper(dinfo, io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections") /* Clean file chunks' hyperslab span "scratch" information */ - curr_node = H5SL_first(fm->sel_chunks); + curr_node = H5SL_first(dinfo->dset_sel_pieces); while(curr_node) { - H5D_chunk_info_t *chunk_info; /* Pointer chunk information */ + H5D_piece_info_t *piece_info; /* Pointer piece information */ - /* Get pointer to chunk's information */ - chunk_info = (H5D_chunk_info_t *)H5SL_item(curr_node); - HDassert(chunk_info); + /* Get pointer to piece's information */ + piece_info = (H5D_piece_info_t *)H5SL_item(curr_node); + HDassert(piece_info); - /* Clean hyperslab span's "scratch" information */ - if(H5S_hyper_reset_scratch(chunk_info->fspace) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset span scratch info") + /* only for current dset */ + if (piece_info->dset_info == dinfo) { + /* Clean hyperslab span's "scratch" information */ + if(H5S_hyper_reset_scratch(piece_info->fspace) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset span scratch info") + } /* end if */ - /* Get the next chunk node in the skip list */ + /* Get the next piece node in the skip list */ curr_node = H5SL_next(curr_node); } /* end while */ } /* end if */ else { H5S_sel_iter_op_t iter_op; /* Operator for iteration */ - H5D_chunk_file_iter_ud_t udata; /* User data for iteration */ /* Create temporary datatypes for selection iteration */ if(NULL == (file_type = H5T_copy(dataset->shared->type, H5T_COPY_ALL))) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "unable to copy file datatype") - /* Initialize the user data */ - udata.fm = fm; -#ifdef H5_HAVE_PARALLEL - udata.io_info = io_info; -#endif /* H5_HAVE_PARALLEL */ - + /* set opdata for H5D__piece_mem_cb */ + io_info_wrap.io_info = io_info; + io_info_wrap.dinfo = dinfo; iter_op.op_type = H5S_SEL_ITER_OP_LIB; - iter_op.u.lib_op = H5D__chunk_file_cb; + iter_op.u.lib_op = H5D__piece_file_cb; /* Spaces might not be the same shape, iterate over the file selection directly */ - if(H5S_select_iterate(&bogus, file_type, file_space, &iter_op, &udata) < 0) + if(H5S_select_iterate(&bogus, file_type, file_space, &iter_op, &io_info_wrap) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections") /* Reset "last chunk" info */ - fm->last_index = (hsize_t)-1; - fm->last_chunk_info = NULL; + dinfo->last_index = (hsize_t)-1; + dinfo->last_piece_info = NULL; } /* end else */ /* Build the memory selection for each chunk */ if(sel_hyper_flag && H5S_select_shape_same(file_space, mem_space) == TRUE) { /* Reset chunk template information */ - fm->mchunk_tmpl = NULL; + dinfo->mchunk_tmpl = NULL; - /* If the selections are the same shape, use the file chunk information - * to generate the memory chunk information quickly. + /* If the selections are the same shape, use the file chunk + * information to generate the memory chunk information quickly. */ - if(H5D__create_chunk_mem_map_hyper(fm) < 0) + if(H5D__create_piece_mem_map_hyper(io_info, dinfo) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create memory chunk selections") } /* end if */ else { @@ -1036,7 +1013,7 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to de-select memory space") /* Save chunk template information */ - fm->mchunk_tmpl = tmp_mspace; + dinfo->mchunk_tmpl = tmp_mspace; /* Create temporary datatypes for selection iteration */ if(!file_type) { @@ -1047,33 +1024,37 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf /* Create selection iterator for memory selection */ if(0 == (elmt_size = H5T_get_size(mem_type))) HGOTO_ERROR(H5E_DATATYPE, H5E_BADSIZE, FAIL, "datatype size invalid") - if(H5S_select_iter_init(&(fm->mem_iter), mem_space, elmt_size) < 0) + if(H5S_select_iter_init(&(dinfo->mem_iter), mem_space, elmt_size) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator") iter_init = TRUE; /* Selection iteration info has been initialized */ + /* set opdata for H5D__piece_mem_cb */ + io_info_wrap.io_info = io_info; + io_info_wrap.dinfo = dinfo; iter_op.op_type = H5S_SEL_ITER_OP_LIB; - iter_op.u.lib_op = H5D__chunk_mem_cb; + iter_op.u.lib_op = H5D__piece_mem_cb; /* Spaces aren't the same shape, iterate over the memory selection directly */ - if(H5S_select_iterate(&bogus, file_type, file_space, &iter_op, fm) < 0) + if(H5S_select_iterate(&bogus, file_type, file_space, &iter_op, &io_info_wrap) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create memory chunk selections") /* Clean up hyperslab stuff, if necessary */ - if(fm->msel_type != H5S_SEL_POINTS) { - /* Clean memory chunks' hyperslab span "scratch" information */ - curr_node = H5SL_first(fm->sel_chunks); + if(dinfo->msel_type != H5S_SEL_POINTS) { + /* Clean memory pieces' hyperslab span "scratch" information */ + curr_node = H5SL_first(dinfo->dset_sel_pieces); + while(curr_node) { - H5D_chunk_info_t *chunk_info; /* Pointer chunk information */ + H5D_piece_info_t *piece_info; /* Pointer piece information */ - /* Get pointer to chunk's information */ - chunk_info = (H5D_chunk_info_t *)H5SL_item(curr_node); - HDassert(chunk_info); + /* Get pointer to piece's information */ + piece_info = (H5D_piece_info_t *)H5SL_item(curr_node); + HDassert(piece_info); /* Clean hyperslab span's "scratch" information */ - if(H5S_hyper_reset_scratch(chunk_info->mspace) < 0) + if(H5S_hyper_reset_scratch(piece_info->mspace) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset span scratch info") - /* Get the next chunk node in the skip list */ + /* Get the next piece node in the skip list */ curr_node = H5SL_next(curr_node); } /* end while */ } /* end if */ @@ -1083,28 +1064,23 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf done: /* Release the [potentially partially built] chunk mapping information if an error occurs */ if(ret_value < 0) { - if(tmp_mspace && !fm->mchunk_tmpl) { + if(tmp_mspace && !dinfo->mchunk_tmpl) if(H5S_close(tmp_mspace) < 0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "can't release memory chunk dataspace template") - } /* end if */ - if(H5D__chunk_io_term(fm) < 0) + if(H5D__piece_io_term(io_info, dinfo) < 0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release chunk mapping") } /* end if */ - /* Reset the global dataspace info */ - fm->file_space = NULL; - fm->mem_space = NULL; - - if(iter_init && H5S_SELECT_ITER_RELEASE(&(fm->mem_iter)) < 0) + if(iter_init && H5S_SELECT_ITER_RELEASE(&(dinfo->mem_iter)) < 0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator") if(file_type && (H5T_close(file_type) < 0)) HDONE_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "Can't free temporary datatype") - if(file_space_normalized) { + + if(file_space_normalized) /* (Casting away const OK -QAK) */ if(H5S_hyper_denormalize_offset((H5S_t *)file_space, old_offset) < 0) HDONE_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset") - } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_io_init() */ @@ -1205,149 +1181,148 @@ H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline) } /* H5D__chunk_mem_realloc() */ -/*-------------------------------------------------------------------------- - NAME - H5D__free_chunk_info - PURPOSE - Internal routine to destroy a chunk info node - USAGE - void H5D__free_chunk_info(chunk_info) - void *chunk_info; IN: Pointer to chunk info to destroy - RETURNS - No return value - DESCRIPTION - Releases all the memory for a chunk info node. Called by H5SL_free - GLOBAL VARIABLES - COMMENTS, BUGS, ASSUMPTIONS - EXAMPLES - REVISION LOG ---------------------------------------------------------------------------*/ +/*------------------------------------------------------------------------- + * Function: H5D__free_piece_info + * + * Purpose: Performs initialization before any sort of I/O on the raw data + * This was derived from H5D__free_chunk_info for multi-dset work. + * + * PURPOSE + * Releases all the memory for a piece info node. + * + * Parameter + * H5D_piece_info_t *item; IN: Pointer to piece info to destroy + * + * RETURNS + * No return value + * + * Programmer: Jonathan Kim Nov, 2013 + *------------------------------------------------------------------------- + */ static herr_t -H5D__free_chunk_info(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *opdata) +H5D__free_piece_info(void *item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *opdata) { - H5D_chunk_info_t *chunk_info = (H5D_chunk_info_t *)item; + H5D_piece_info_t *piece_info = (H5D_piece_info_t *)item; FUNC_ENTER_STATIC_NOERR - HDassert(chunk_info); + HDassert(piece_info); - /* Close the chunk's file dataspace, if it's not shared */ - if(!chunk_info->fspace_shared) - (void)H5S_close(chunk_info->fspace); + /* Close the piece's file dataspace, if it's not shared */ + if(!piece_info->fspace_shared) + (void)H5S_close(piece_info->fspace); else - H5S_select_all(chunk_info->fspace, TRUE); + H5S_select_all(piece_info->fspace, TRUE); - /* Close the chunk's memory dataspace, if it's not shared */ - if(!chunk_info->mspace_shared && chunk_info->mspace) - (void)H5S_close(chunk_info->mspace); + /* Close the piece's memory dataspace, if it's not shared */ + if(!piece_info->mspace_shared && piece_info->mspace) + (void)H5S_close(piece_info->mspace); - /* Free the actual chunk info */ - chunk_info = H5FL_FREE(H5D_chunk_info_t, chunk_info); + /* Free the actual piece info */ + piece_info = H5FL_FREE(H5D_piece_info_t, piece_info); FUNC_LEAVE_NOAPI(0) -} /* H5D__free_chunk_info() */ +} /* H5D__free_piece_info() */ - /*------------------------------------------------------------------------- - * Function: H5D__create_chunk_map_single + * Function: H5D__create_piece_map_single * - * Purpose: Create chunk selections when appending a single record + * Purpose: Create piece selections when appending a single record + * This was derived from H5D__create_chunk_map_single for + * multi-dset work. * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Tuesday, November 20, 2007 - * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t -H5D__create_chunk_map_single(H5D_chunk_map_t *fm, const H5D_io_info_t -#ifndef H5_HAVE_PARALLEL - H5_ATTR_UNUSED -#endif /* H5_HAVE_PARALLEL */ - *io_info) +H5D__create_piece_map_single(H5D_dset_info_t *di, + const H5D_io_info_t *io_info) { - H5D_chunk_info_t *chunk_info; /* Chunk information to insert into skip list */ + H5D_piece_info_t *piece_info; /* Piece information to insert into skip list */ hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk */ hsize_t sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ unsigned u; /* Local index variable */ + H5D_chunk_ud_t udata; /* User data for querying piece info */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_STATIC_TAG(io_info->md_dxpl_id, di->dset->oloc.addr, FAIL) /* Sanity check */ - HDassert(fm->f_ndims > 0); + HDassert(di->f_ndims > 0); /* Get coordinate for selection */ - if(H5S_SELECT_BOUNDS(fm->file_space, sel_start, sel_end) < 0) + if(H5S_SELECT_BOUNDS(di->file_space, sel_start, sel_end) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info") - /* Initialize the 'single chunk' file & memory chunk information */ - chunk_info = fm->single_chunk_info; - chunk_info->chunk_points = 1; + /* Initialize the 'single piece' file & memory piece information */ + piece_info = di->single_piece_info; + piece_info->piece_points = 1; /* Set chunk location & hyperslab size */ - for(u = 0; u < fm->f_ndims; u++) { + for(u = 0; u < di->f_ndims; u++) { HDassert(sel_start[u] == sel_end[u]); - chunk_info->scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u]; - coords[u] = chunk_info->scaled[u] * fm->layout->u.chunk.dim[u]; + piece_info->scaled[u] = sel_start[u] / di->layout->u.chunk.dim[u]; + coords[u] = piece_info->scaled[u] * di->layout->u.chunk.dim[u]; } /* end for */ - chunk_info->scaled[fm->f_ndims] = 0; + piece_info->scaled[di->f_ndims] = 0; /* Calculate the index of this chunk */ - chunk_info->index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, chunk_info->scaled); + piece_info->index = H5VM_array_offset_pre(di->f_ndims, di->layout->u.chunk.down_chunks, piece_info->scaled); /* Copy selection for file's dataspace into chunk dataspace */ - if(H5S_select_copy(fm->single_space, fm->file_space, FALSE) < 0) + if(H5S_select_copy(di->single_space, di->file_space, FALSE) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy file selection") /* Move selection back to have correct offset in chunk */ - if(H5S_SELECT_ADJUST_U(fm->single_space, coords) < 0) + if(H5S_SELECT_ADJUST_U(di->single_space, coords) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection") -#ifdef H5_HAVE_PARALLEL - /* store chunk selection information */ - if(io_info->using_mpi_vfd) - fm->select_chunk[chunk_info->index] = chunk_info; -#endif /* H5_HAVE_PARALLEL */ - /* Set the file dataspace for the chunk to the shared 'single' dataspace */ - chunk_info->fspace = fm->single_space; + piece_info->fspace = di->single_space; /* Indicate that the chunk's file dataspace is shared */ - chunk_info->fspace_shared = TRUE; + piece_info->fspace_shared = TRUE; /* Just point at the memory dataspace & selection */ /* (Casting away const OK -QAK) */ - chunk_info->mspace = (H5S_t *)fm->mem_space; + piece_info->mspace = (H5S_t *)di->mem_space; /* Indicate that the chunk's memory dataspace is shared */ - chunk_info->mspace_shared = TRUE; + piece_info->mspace_shared = TRUE; + + /* make connection to related dset info from this piece_info */ + piece_info->dset_info = di; + + /* get piece file address */ + if(H5D__chunk_lookup(piece_info->dset_info->dset, io_info->md_dxpl_id, piece_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + piece_info->faddr = udata.chunk_block.offset; + + /* Insert piece into global piece skiplist */ + if(H5SL_insert(io_info->sel_pieces, piece_info, &piece_info->faddr) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't insert chunk into skip list") done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__create_chunk_map_single() */ + FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL) +} /* end H5D__create_piece_map_single() */ - /*------------------------------------------------------------------------- - * Function: H5D__create_chunk_file_map_hyper + * Function: H5D__create_piece_file_map_hyper * * Purpose: Create all chunk selections in file. + * This was derived from H5D__create_chunk_file_map_hyper for + * multi-dset work. * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Thursday, May 29, 2003 - * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t -H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t -#ifndef H5_HAVE_PARALLEL - H5_ATTR_UNUSED -#endif /* H5_HAVE_PARALLEL */ - *io_info) +H5D__create_piece_file_map_hyper(H5D_dset_info_t *dinfo, const H5D_io_info_t *io_info) { hsize_t sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ @@ -1362,39 +1337,41 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_STATIC + FUNC_ENTER_STATIC_TAG(io_info->md_dxpl_id, dinfo->dset->oloc.addr, FAIL) /* Sanity check */ - HDassert(fm->f_ndims>0); + HDassert(dinfo->f_ndims > 0); /* Get number of elements selected in file */ - sel_points = fm->nelmts; + sel_points = dinfo->nelmts; /* Get bounding box for selection (to reduce the number of chunks to iterate over) */ - if(H5S_SELECT_BOUNDS(fm->file_space, sel_start, sel_end) < 0) + if(H5S_SELECT_BOUNDS(dinfo->file_space, sel_start, sel_end) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info") /* Set initial chunk location & hyperslab size */ - for(u = 0; u < fm->f_ndims; u++) { - scaled[u] = start_scaled[u] = sel_start[u] / fm->layout->u.chunk.dim[u]; - coords[u] = start_coords[u] = scaled[u] * fm->layout->u.chunk.dim[u]; - end[u] = (coords[u] + fm->chunk_dim[u]) - 1; + for(u = 0; u < dinfo->f_ndims; u++) { + scaled[u] = start_scaled[u] = sel_start[u] / dinfo->layout->u.chunk.dim[u]; + coords[u] = start_coords[u] = scaled[u] * dinfo->layout->u.chunk.dim[u]; + end[u] = (coords[u] + dinfo->chunk_dim[u]) - 1; } /* end for */ /* Calculate the index of this chunk */ - chunk_index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, scaled); + chunk_index = H5VM_array_offset_pre(dinfo->f_ndims, dinfo->layout->u.chunk.down_chunks, scaled); /* Iterate through each chunk in the dataset */ while(sel_points) { + H5D_chunk_ud_t udata; /* User data for querying chunk info */ + /* Check for intersection of temporary chunk and file selection */ /* (Casting away const OK - QAK) */ - if(TRUE == H5S_hyper_intersect_block((H5S_t *)fm->file_space, coords, end)) { + if(TRUE == H5S_hyper_intersect_block((H5S_t *)dinfo->file_space, coords, end)) { H5S_t *tmp_fchunk; /* Temporary file dataspace */ - H5D_chunk_info_t *new_chunk_info; /* chunk information to insert into skip list */ + H5D_piece_info_t *new_piece_info; /* chunk information to insert into skip list */ hssize_t schunk_points; /* Number of elements in chunk selection */ /* Create "temporary" chunk for selection operations (copy file space) */ - if(NULL == (tmp_fchunk = H5S_copy(fm->file_space, TRUE, FALSE))) + if(NULL == (tmp_fchunk = H5S_copy(dinfo->file_space, TRUE, FALSE))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space") /* Make certain selections are stored in span tree form (not "optimized hyperslab" or "all") */ @@ -1404,13 +1381,13 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t } /* end if */ /* "AND" temporary chunk and current chunk */ - if(H5S_select_hyperslab(tmp_fchunk,H5S_SELECT_AND,coords,NULL,fm->chunk_dim,NULL) < 0) { + if(H5S_select_hyperslab(tmp_fchunk,H5S_SELECT_AND,coords,NULL,dinfo->chunk_dim,NULL) < 0) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't create chunk selection") } /* end if */ /* Resize chunk's dataspace dimensions to size of chunk */ - if(H5S_set_extent_real(tmp_fchunk,fm->chunk_dim) < 0) { + if(H5S_set_extent_real(tmp_fchunk,dinfo->chunk_dim) < 0) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk dimensions") } /* end if */ @@ -1424,7 +1401,7 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t /* Add temporary chunk to the list of chunks */ /* Allocate the file & memory chunk information */ - if (NULL==(new_chunk_info = H5FL_MALLOC(H5D_chunk_info_t))) { + if (NULL == (new_piece_info = H5FL_MALLOC(H5D_piece_info_t))) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info") } /* end if */ @@ -1432,39 +1409,46 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t /* Initialize the chunk information */ /* Set the chunk index */ - new_chunk_info->index=chunk_index; - -#ifdef H5_HAVE_PARALLEL - /* Store chunk selection information, for multi-chunk I/O */ - if(io_info->using_mpi_vfd) - fm->select_chunk[chunk_index] = new_chunk_info; -#endif /* H5_HAVE_PARALLEL */ + new_piece_info->index=chunk_index; /* Set the file chunk dataspace */ - new_chunk_info->fspace = tmp_fchunk; - new_chunk_info->fspace_shared = FALSE; + new_piece_info->fspace = tmp_fchunk; + new_piece_info->fspace_shared = FALSE; /* Set the memory chunk dataspace */ - new_chunk_info->mspace=NULL; - new_chunk_info->mspace_shared = FALSE; + new_piece_info->mspace=NULL; + new_piece_info->mspace_shared = FALSE; /* Copy the chunk's scaled coordinates */ - HDmemcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); - new_chunk_info->scaled[fm->f_ndims] = 0; + HDmemcpy(new_piece_info->scaled, scaled, sizeof(hsize_t) * dinfo->f_ndims); + new_piece_info->scaled[dinfo->f_ndims] = 0; - /* Copy the chunk's scaled coordinates */ - HDmemcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); + /* make connection to related dset info from this piece_info */ + new_piece_info->dset_info = dinfo; + + /* get chunk file address */ + if(H5D__chunk_lookup(new_piece_info->dset_info->dset, io_info->md_dxpl_id, new_piece_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + new_piece_info->faddr = udata.chunk_block.offset; + if(HADDR_UNDEF != udata.chunk_block.offset) { + /* Insert the new piece into the global skip list */ + if(H5SL_insert(io_info->sel_pieces, new_piece_info, &new_piece_info->faddr) < 0) { + H5D__free_piece_info(new_piece_info, NULL, NULL); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't insert chunk into skip list") + } /* end if */ + } - /* Insert the new chunk into the skip list */ - if(H5SL_insert(fm->sel_chunks, new_chunk_info, &new_chunk_info->index) < 0) { - H5D__free_chunk_info(new_chunk_info, NULL, NULL); - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't insert chunk into skip list") + /* Insert the new piece into the dataset skip list */ + if(H5SL_insert(dinfo->dset_sel_pieces, new_piece_info, &new_piece_info->index) < 0) { + H5D__free_piece_info(new_piece_info, NULL, NULL); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't insert chunk into dataset skip list") } /* end if */ /* Get number of elements selected in chunk */ if((schunk_points = H5S_GET_SELECT_NPOINTS(tmp_fchunk)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements") - H5_CHECKED_ASSIGN(new_chunk_info->chunk_points, uint32_t, schunk_points, hssize_t); + H5_CHECKED_ASSIGN(new_piece_info->piece_points, uint32_t, schunk_points, hssize_t); /* Decrement # of points left in file selection */ sel_points -= (hsize_t)schunk_points; @@ -1478,12 +1462,12 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t chunk_index++; /* Set current increment dimension */ - curr_dim=(int)fm->f_ndims-1; + curr_dim=(int)dinfo->f_ndims-1; /* Increment chunk location in fastest changing dimension */ - H5_CHECK_OVERFLOW(fm->chunk_dim[curr_dim],hsize_t,hssize_t); - coords[curr_dim]+=fm->chunk_dim[curr_dim]; - end[curr_dim]+=fm->chunk_dim[curr_dim]; + H5_CHECK_OVERFLOW(dinfo->chunk_dim[curr_dim],hsize_t,hssize_t); + coords[curr_dim]+=dinfo->chunk_dim[curr_dim]; + end[curr_dim]+=dinfo->chunk_dim[curr_dim]; scaled[curr_dim]++; /* Bring chunk location back into bounds, if necessary */ @@ -1492,164 +1476,164 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t /* Reset current dimension's location to 0 */ scaled[curr_dim] = start_scaled[curr_dim]; coords[curr_dim] = start_coords[curr_dim]; /*lint !e771 The start_coords will always be initialized */ - end[curr_dim] = (coords[curr_dim] + fm->chunk_dim[curr_dim]) - 1; + end[curr_dim] = (coords[curr_dim] + dinfo->chunk_dim[curr_dim]) - 1; /* Decrement current dimension */ curr_dim--; /* Increment chunk location in current dimension */ scaled[curr_dim]++; - coords[curr_dim] += fm->chunk_dim[curr_dim]; - end[curr_dim] = (coords[curr_dim] + fm->chunk_dim[curr_dim]) - 1; + coords[curr_dim] += dinfo->chunk_dim[curr_dim]; + end[curr_dim] = (coords[curr_dim] + dinfo->chunk_dim[curr_dim]) - 1; } while(coords[curr_dim] > sel_end[curr_dim]); /* Re-calculate the index of this chunk */ - chunk_index = H5VM_array_offset_pre(fm->f_ndims, fm->layout->u.chunk.down_chunks, scaled); + chunk_index = H5VM_array_offset_pre(dinfo->f_ndims, dinfo->layout->u.chunk.down_chunks, scaled); } /* end if */ } /* end while */ done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__create_chunk_file_map_hyper() */ + FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL) +} /* end H5D__create_piece_file_map_hyper() */ - /*------------------------------------------------------------------------- - * Function: H5D__create_chunk_mem_map_hyper + * Function: H5D__create_piece_mem_map_hyper * * Purpose: Create all chunk selections in memory by copying the file - * chunk selections and adjusting their offsets to be correct - * for the memory. + * chunk selections and adjusting their offsets to be correct + * for the memory. + * This was derived from H5D__create_chunk_mem_map_hyper for + * multi-dset work. * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Thursday, May 29, 2003 + * Programmer: Jonathan Kim Nov, 2013 * * Assumptions: That the file and memory selections are the same shape. - * *------------------------------------------------------------------------- */ static herr_t -H5D__create_chunk_mem_map_hyper(const H5D_chunk_map_t *fm) +H5D__create_piece_mem_map_hyper(const H5D_io_info_t H5_ATTR_UNUSED *io_info, const H5D_dset_info_t *dinfo) { H5SL_node_t *curr_node; /* Current node in skip list */ hsize_t file_sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t file_sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ hsize_t mem_sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t mem_sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ - hssize_t adjust[H5O_LAYOUT_NDIMS]; /* Adjustment to make to all file chunks */ - hssize_t chunk_adjust[H5O_LAYOUT_NDIMS]; /* Adjustment to make to a particular chunk */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + hssize_t adjust[H5O_LAYOUT_NDIMS]; /* Adjustment to make to all file chunks */ + hssize_t piece_adjust[H5O_LAYOUT_NDIMS]; /* Adjustment to make to a particular chunk */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC /* Sanity check */ - HDassert(fm->f_ndims>0); + HDassert(dinfo->f_ndims>0); /* Check for all I/O going to a single chunk */ - if(H5SL_count(fm->sel_chunks)==1) { - H5D_chunk_info_t *chunk_info; /* Pointer to chunk information */ + //if(H5SL_count(io_info->sel_pieces) == 1) { + if(H5SL_count(dinfo->dset_sel_pieces) == 1) { + H5D_piece_info_t *piece_info; /* Pointer to piece information */ /* Get the node */ - curr_node=H5SL_first(fm->sel_chunks); + //curr_node=H5SL_first(io_info->sel_pieces); + curr_node=H5SL_first(dinfo->dset_sel_pieces); - /* Get pointer to chunk's information */ - chunk_info = (H5D_chunk_info_t *)H5SL_item(curr_node); - HDassert(chunk_info); + /* Get pointer to piece's information */ + piece_info = (H5D_piece_info_t *)H5SL_item(curr_node); + HDassert(piece_info); /* Just point at the memory dataspace & selection */ /* (Casting away const OK -QAK) */ - chunk_info->mspace = (H5S_t *)fm->mem_space; + piece_info->mspace = (H5S_t *)dinfo->mem_space; - /* Indicate that the chunk's memory space is shared */ - chunk_info->mspace_shared = TRUE; + /* Indicate that the piece's memory space is shared */ + piece_info->mspace_shared = TRUE; } /* end if */ else { /* Get bounding box for file selection */ - if(H5S_SELECT_BOUNDS(fm->file_space, file_sel_start, file_sel_end) < 0) + if(H5S_SELECT_BOUNDS(dinfo->file_space, file_sel_start, file_sel_end) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info") /* Get bounding box for memory selection */ - if(H5S_SELECT_BOUNDS(fm->mem_space, mem_sel_start, mem_sel_end) < 0) + if(H5S_SELECT_BOUNDS(dinfo->mem_space, mem_sel_start, mem_sel_end) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info") /* Calculate the adjustment for memory selection from file selection */ - HDassert(fm->m_ndims==fm->f_ndims); - for(u=0; u<fm->f_ndims; u++) { + HDassert(dinfo->m_ndims==dinfo->f_ndims); + for(u=0; u<dinfo->f_ndims; u++) { H5_CHECK_OVERFLOW(file_sel_start[u],hsize_t,hssize_t); H5_CHECK_OVERFLOW(mem_sel_start[u],hsize_t,hssize_t); adjust[u]=(hssize_t)file_sel_start[u]-(hssize_t)mem_sel_start[u]; } /* end for */ - /* Iterate over each chunk in the chunk list */ - curr_node=H5SL_first(fm->sel_chunks); - while(curr_node) { - H5D_chunk_info_t *chunk_info; /* Pointer to chunk information */ + /* Iterate over each piece in the dataset's piece skiplist */ + HDassert(dinfo->dset_sel_pieces); + curr_node=H5SL_first(dinfo->dset_sel_pieces); - /* Get pointer to chunk's information */ - chunk_info = (H5D_chunk_info_t *)H5SL_item(curr_node); - HDassert(chunk_info); + while(curr_node) { + H5D_piece_info_t *piece_info; /* Pointer to piece information */ - /* Copy the information */ + /* Get pointer to piece's information */ + piece_info = (H5D_piece_info_t *)H5SL_item(curr_node); + HDassert(piece_info); /* Copy the memory dataspace */ - if((chunk_info->mspace = H5S_copy(fm->mem_space, TRUE, FALSE)) == NULL) + if((piece_info->mspace = H5S_copy(dinfo->mem_space, TRUE, FALSE)) == NULL) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space") /* Release the current selection */ - if(H5S_SELECT_RELEASE(chunk_info->mspace) < 0) + if(H5S_SELECT_RELEASE(piece_info->mspace) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection") - /* Copy the file chunk's selection */ - if(H5S_select_copy(chunk_info->mspace,chunk_info->fspace,FALSE) < 0) + /* Copy the file piece's selection */ + if(H5S_select_copy(piece_info->mspace, piece_info->fspace, FALSE) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy selection") /* Compute the adjustment for this chunk */ - for(u = 0; u < fm->f_ndims; u++) { + for(u = 0; u < dinfo->f_ndims; u++) { hsize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */ /* Compute the chunk coordinates from the scaled coordinates */ - coords[u] = chunk_info->scaled[u] * fm->layout->u.chunk.dim[u]; + coords[u] = piece_info->scaled[u] * dinfo->layout->u.chunk.dim[u]; /* Compensate for the chunk offset */ H5_CHECK_OVERFLOW(coords[u], hsize_t, hssize_t); - chunk_adjust[u] = adjust[u] - (hssize_t)coords[u]; /*lint !e771 The adjust array will always be initialized */ + piece_adjust[u] = adjust[u] - (hssize_t)coords[u]; /*lint !e771 The adjust array will always be initialized */ } /* end for */ /* Adjust the selection */ - if(H5S_hyper_adjust_s(chunk_info->mspace,chunk_adjust) < 0) /*lint !e772 The chunk_adjust array will always be initialized */ + if(H5S_hyper_adjust_s(piece_info->mspace,piece_adjust) < 0) /*lint !e772 The piece_adjust array will always be initialized */ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection") - /* Get the next chunk node in the skip list */ + /* Get the next piece node in the skip list */ curr_node=H5SL_next(curr_node); } /* end while */ } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__create_chunk_mem_map_hyper() */ +} /* end H5D__create_piece_mem_map_hyper() */ - /*------------------------------------------------------------------------- - * Function: H5D__chunk_file_cb + * Function: H5D__piece_file_cb * * Purpose: Callback routine for file selection iterator. Used when * creating selections in file for each point selected. * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Wednesday, July 23, 2003 - * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, unsigned ndims, const hsize_t *coords, void *_udata) +H5D__piece_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, unsigned ndims, const hsize_t *coords, void *_opdata) { - H5D_chunk_file_iter_ud_t *udata = (H5D_chunk_file_iter_ud_t *)_udata; /* User data for operation */ - H5D_chunk_map_t *fm = udata->fm; /* File<->memory chunk mapping info */ - H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */ + H5D_io_info_wrap_t *opdata = (H5D_io_info_wrap_t *)_opdata; + H5D_io_info_t *io_info = (H5D_io_info_t *) opdata->io_info; /* io info for mumti dset */ + H5D_dset_info_t *dinfo = (H5D_dset_info_t *) opdata->dinfo; /* File<->memory piece mapping info */ + H5D_piece_info_t *piece_info; /* Chunk information for current piece */ + hsize_t coords_in_chunk[H5O_LAYOUT_NDIMS]; /* Coordinates of element in chunk */ hsize_t chunk_index; /* Chunk index */ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */ @@ -1659,113 +1643,130 @@ H5D__chunk_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, FUNC_ENTER_STATIC /* Calculate the index of this chunk */ - chunk_index = H5VM_chunk_index_scaled(ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks, scaled); + chunk_index = H5VM_chunk_index_scaled(ndims, coords, dinfo->layout->u.chunk.dim, + dinfo->layout->u.chunk.down_chunks, scaled); /* Find correct chunk in file & memory skip list */ - if(chunk_index==fm->last_index) { + if(chunk_index == dinfo->last_index) { /* If the chunk index is the same as the last chunk index we used, * get the cached info to operate on. */ - chunk_info = fm->last_chunk_info; + piece_info = dinfo->last_piece_info; } /* end if */ else { + haddr_t prev_tag = HADDR_UNDEF; + H5D_chunk_ud_t udata; /* User data for querying piece info */ + /* If the chunk index is not the same as the last chunk index we used, - * find the chunk in the skip list. - */ - /* Get the chunk node from the skip list */ - if(NULL == (chunk_info = (H5D_chunk_info_t *)H5SL_search(fm->sel_chunks, &chunk_index))) { + * search for the chunk in the skip list. If we do not find it, create + * a new node. */ + if(NULL == (piece_info = (H5D_piece_info_t *)H5SL_search(dinfo->dset_sel_pieces, &chunk_index))) { H5S_t *fspace; /* Memory chunk's dataspace */ /* Allocate the file & memory chunk information */ - if (NULL==(chunk_info = H5FL_MALLOC (H5D_chunk_info_t))) + if (NULL==(piece_info = H5FL_MALLOC (H5D_piece_info_t))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info") /* Initialize the chunk information */ /* Set the chunk index */ - chunk_info->index=chunk_index; + piece_info->index = chunk_index; /* Create a dataspace for the chunk */ - if((fspace = H5S_create_simple(fm->f_ndims,fm->chunk_dim,NULL))==NULL) { - chunk_info = H5FL_FREE(H5D_chunk_info_t, chunk_info); + if((fspace = H5S_create_simple(dinfo->f_ndims, dinfo->chunk_dim, NULL)) == NULL) { + piece_info = H5FL_FREE(H5D_piece_info_t, piece_info); HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create dataspace for chunk") } /* end if */ /* De-select the chunk space */ if(H5S_select_none(fspace) < 0) { (void)H5S_close(fspace); - chunk_info = H5FL_FREE(H5D_chunk_info_t, chunk_info); + piece_info = H5FL_FREE(H5D_piece_info_t, piece_info); HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to de-select dataspace") } /* end if */ /* Set the file chunk dataspace */ - chunk_info->fspace = fspace; - chunk_info->fspace_shared = FALSE; + piece_info->fspace = fspace; + piece_info->fspace_shared = FALSE; /* Set the memory chunk dataspace */ - chunk_info->mspace = NULL; - chunk_info->mspace_shared = FALSE; + piece_info->mspace = NULL; + piece_info->mspace_shared = FALSE; /* Set the number of selected elements in chunk to zero */ - chunk_info->chunk_points = 0; + piece_info->piece_points = 0; /* Set the chunk's scaled coordinates */ - HDmemcpy(chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); - chunk_info->scaled[fm->f_ndims] = 0; - HDmemcpy(chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims); - - /* Insert the new chunk into the skip list */ - if(H5SL_insert(fm->sel_chunks,chunk_info,&chunk_info->index) < 0) { - H5D__free_chunk_info(chunk_info,NULL,NULL); - HGOTO_ERROR(H5E_DATASPACE,H5E_CANTINSERT,FAIL,"can't insert chunk into skip list") + HDmemcpy(piece_info->scaled, scaled, sizeof(hsize_t) * dinfo->f_ndims); + piece_info->scaled[dinfo->f_ndims] = 0; + + /* make connection to related dset info from this piece_info */ + piece_info->dset_info = dinfo; + + /* Insert the new piece into the dataset skip list */ + if(H5SL_insert(dinfo->dset_sel_pieces, piece_info, &piece_info->index) < 0) { + H5D__free_piece_info(piece_info, NULL, NULL); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't insert chunk into dataset skip list") } /* end if */ - } /* end if */ -#ifdef H5_HAVE_PARALLEL - /* Store chunk selection information, for collective multi-chunk I/O */ - if(udata->io_info->using_mpi_vfd) - fm->select_chunk[chunk_index] = chunk_info; -#endif /* H5_HAVE_PARALLEL */ + /* set metadata tagging with dset oheader addr for H5D__chunk_lookup */ + if(H5AC_tag(io_info->md_dxpl_id, piece_info->dset_info->dset->oloc.addr, &prev_tag) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") + /* get chunk file address */ + if(H5D__chunk_lookup(piece_info->dset_info->dset, io_info->md_dxpl_id, piece_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skip list") + piece_info->faddr = udata.chunk_block.offset; + /* Reset metadata tagging */ + if(H5AC_tag(io_info->md_dxpl_id, prev_tag, NULL) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") + + if(HADDR_UNDEF != udata.chunk_block.offset) { + /* Insert the new piece into the global skip list */ + if(H5SL_insert(io_info->sel_pieces, piece_info, &piece_info->faddr) < 0) { + H5D__free_piece_info(piece_info,NULL,NULL); + HGOTO_ERROR(H5E_DATASPACE,H5E_CANTINSERT,FAIL,"can't insert chunk into skip list") + } /* end if */ + } + } /* end if */ /* Update the "last chunk seen" information */ - fm->last_index = chunk_index; - fm->last_chunk_info = chunk_info; + dinfo->last_index = chunk_index; + dinfo->last_piece_info = piece_info; } /* end else */ /* Get the offset of the element within the chunk */ - for(u = 0; u < fm->f_ndims; u++) - coords_in_chunk[u] = coords[u] - (scaled[u] * fm->layout->u.chunk.dim[u]); + for(u = 0; u < dinfo->f_ndims; u++) + coords_in_chunk[u] = coords[u] - (scaled[u] * dinfo->layout->u.chunk.dim[u]); /* Add point to file selection for chunk */ - if(H5S_select_elements(chunk_info->fspace, H5S_SELECT_APPEND, (size_t)1, coords_in_chunk) < 0) + if(H5S_select_elements(piece_info->fspace, H5S_SELECT_APPEND, (size_t)1, coords_in_chunk) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "unable to select element") /* Increment the number of elemented selected in chunk */ - chunk_info->chunk_points++; + piece_info->piece_points++; done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__chunk_file_cb() */ +} /* end H5D__piece_file_cb */ - /*------------------------------------------------------------------------- - * Function: H5D__chunk_mem_cb + * Function: H5D__piece_mem_cb * * Purpose: Callback routine for file selection iterator. Used when - * creating selections in memory for each chunk. + * creating selections in memory for each piece. + * This was derived from H5D__chunk_mem_cb for multi-dset work. * * Return: Non-negative on success/Negative on failure * - * Programmer: Raymond Lu - * Thursday, April 10, 2003 - * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_mem_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, unsigned ndims, const hsize_t *coords, void *_fm) +H5D__piece_mem_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, unsigned ndims, const hsize_t *coords, void *_opdata) { - H5D_chunk_map_t *fm = (H5D_chunk_map_t *)_fm; /* File<->memory chunk mapping info */ - H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */ + H5D_io_info_wrap_t *opdata = (H5D_io_info_wrap_t *)_opdata; + H5D_dset_info_t *dinfo = (H5D_dset_info_t *) opdata->dinfo; /* File<->memory chunk mapping info */ + H5D_piece_info_t *piece_info; /* Chunk information for current piece */ hsize_t coords_in_mem[H5O_LAYOUT_NDIMS]; /* Coordinates of element in memory */ hsize_t chunk_index; /* Chunk index */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1773,56 +1774,56 @@ H5D__chunk_mem_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type, u FUNC_ENTER_STATIC /* Calculate the index of this chunk */ - chunk_index = H5VM_chunk_index(ndims, coords, fm->layout->u.chunk.dim, fm->layout->u.chunk.down_chunks); + chunk_index = H5VM_chunk_index(ndims, coords, dinfo->layout->u.chunk.dim, + dinfo->layout->u.chunk.down_chunks); /* Find correct chunk in file & memory skip list */ - if(chunk_index == fm->last_index) { + if(chunk_index == dinfo->last_index) { /* If the chunk index is the same as the last chunk index we used, * get the cached spaces to operate on. */ - chunk_info = fm->last_chunk_info; + piece_info = dinfo->last_piece_info; } /* end if */ else { /* If the chunk index is not the same as the last chunk index we used, - * find the chunk in the skip list. + * find the chunk in the dataset skip list. */ - /* Get the chunk node from the skip list */ - if(NULL == (chunk_info = (H5D_chunk_info_t *)H5SL_search(fm->sel_chunks, &chunk_index))) - HGOTO_ERROR(H5E_DATASPACE, H5E_NOTFOUND, FAIL, "can't locate chunk in skip list") + if(NULL == (piece_info = (H5D_piece_info_t *)H5SL_search(dinfo->dset_sel_pieces, &chunk_index))) + HGOTO_ERROR(H5E_DATASPACE, H5E_NOTFOUND, FAIL, "can't locate piece in dataset skip list") /* Check if the chunk already has a memory space */ - if(NULL == chunk_info->mspace) { + if(NULL == piece_info->mspace) { /* Copy the template memory chunk dataspace */ - if(NULL == (chunk_info->mspace = H5S_copy(fm->mchunk_tmpl, FALSE, FALSE))) + if(NULL == (piece_info->mspace = H5S_copy(dinfo->mchunk_tmpl, FALSE, FALSE))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy file space") } /* end else */ - /* Update the "last chunk seen" information */ - fm->last_index = chunk_index; - fm->last_chunk_info = chunk_info; + /* Update the "last piece seen" information */ + dinfo->last_index = chunk_index; + dinfo->last_piece_info = piece_info; } /* end else */ /* Get coordinates of selection iterator for memory */ - if(H5S_SELECT_ITER_COORDS(&fm->mem_iter, coords_in_mem) < 0) + if(H5S_SELECT_ITER_COORDS(&dinfo->mem_iter, coords_in_mem) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator coordinates") /* Add point to memory selection for chunk */ - if(fm->msel_type == H5S_SEL_POINTS) { - if(H5S_select_elements(chunk_info->mspace, H5S_SELECT_APPEND, (size_t)1, coords_in_mem) < 0) + if(dinfo->msel_type == H5S_SEL_POINTS) { + if(H5S_select_elements(piece_info->mspace, H5S_SELECT_APPEND, (size_t)1, coords_in_mem) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "unable to select element") } /* end if */ else { - if(H5S_hyper_add_span_element(chunk_info->mspace, fm->m_ndims, coords_in_mem) < 0) + if(H5S_hyper_add_span_element(piece_info->mspace, dinfo->m_ndims, coords_in_mem) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "unable to select element") } /* end else */ /* Move memory selection iterator to next element in selection */ - if(H5S_SELECT_ITER_NEXT(&fm->mem_iter, (size_t)1) < 0) + if(H5S_SELECT_ITER_NEXT(&dinfo->mem_iter, (size_t)1) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to move to next iterator location") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__chunk_mem_cb() */ +} /* end H5D__piece_mem_cb() */ /*------------------------------------------------------------------------- @@ -1839,9 +1840,10 @@ done: *------------------------------------------------------------------------- */ htri_t -H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_op) +H5D__chunk_cacheable(const H5D_io_info_t *io_info, H5D_dset_info_t *dset_info, + haddr_t caddr, hbool_t write_op) { - const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */ + const H5D_t *dataset = NULL; /* Local pointer to dataset */ hbool_t has_filters = FALSE; /* Whether there are filters on the chunk or not */ htri_t ret_value = FAIL; /* Return value */ @@ -1849,6 +1851,7 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_ /* Sanity check */ HDassert(io_info); + dataset = dset_info->dset; HDassert(dataset); /* Must bring the whole chunk in if there are any filters on the chunk. @@ -1858,10 +1861,10 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_ if(dataset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { has_filters = !H5D__chunk_is_partial_edge_chunk( - io_info->dset->shared->ndims, - io_info->dset->shared->layout.u.chunk.dim, - io_info->store->chunk.scaled, - io_info->dset->shared->curr_dims); + dataset->shared->ndims, + dataset->shared->layout.u.chunk.dim, + dset_info->store->chunk.scaled, + dataset->shared->curr_dims); } /* end if */ else has_filters = TRUE; @@ -1932,15 +1935,22 @@ done: static herr_t H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, - H5D_chunk_map_t *fm) + H5D_dset_info_t *dset_info) { H5SL_node_t *chunk_node; /* Current node in chunk skip list */ - H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */ + + H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */ + H5D_dset_info_t nonexistent_dset_info; /* "nonexistent" I/O dset info object */ + H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ + H5D_dset_info_t ctg_dset_info; /* Contiguous I/O dset info object */ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ + H5D_io_info_t cpt_io_info; /* Compact I/O info object */ + H5D_dset_info_t cpt_dset_info; /* Compact I/O dset info object */ H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ + uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */ hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */ herr_t ret_value = SUCCEED; /*return value */ @@ -1949,32 +1959,39 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Sanity check */ HDassert(io_info); - HDassert(io_info->u.rbuf); + HDassert(dset_info); + HDassert(dset_info->u.rbuf); HDassert(type_info); - HDassert(fm); + HDassert(dset_info == io_info->dsets_info); /* Set up "nonexistent" I/O info object */ HDmemcpy(&nonexistent_io_info, io_info, sizeof(nonexistent_io_info)); - nonexistent_io_info.layout_ops = *H5D_LOPS_NONEXISTENT; + HDmemcpy(&nonexistent_dset_info, dset_info, sizeof(nonexistent_dset_info)); + nonexistent_dset_info.layout_ops = *H5D_LOPS_NONEXISTENT; + nonexistent_io_info.dsets_info = &nonexistent_dset_info; /* Set up contiguous I/O info object */ HDmemcpy(&ctg_io_info, io_info, sizeof(ctg_io_info)); - ctg_io_info.store = &ctg_store; - ctg_io_info.layout_ops = *H5D_LOPS_CONTIG; + HDmemcpy(&ctg_dset_info, dset_info, sizeof(ctg_dset_info)); + ctg_dset_info.store = &ctg_store; + ctg_dset_info.layout_ops = *H5D_LOPS_CONTIG; + ctg_io_info.dsets_info = &ctg_dset_info; /* Initialize temporary contiguous storage info */ - H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t); + H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, dset_info->dset->shared->layout.u.chunk.size, uint32_t); /* Set up compact I/O info object */ HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info)); - cpt_io_info.store = &cpt_store; - cpt_io_info.layout_ops = *H5D_LOPS_COMPACT; + HDmemcpy(&cpt_dset_info, dset_info, sizeof(cpt_dset_info)); + cpt_dset_info.store = &cpt_store; + cpt_dset_info.layout_ops = *H5D_LOPS_COMPACT; + cpt_io_info.dsets_info = &cpt_dset_info; /* Initialize temporary compact storage info */ cpt_store.compact.dirty = &cpt_dirty; { - const H5O_fill_t *fill = &(io_info->dset->shared->dcpl_cache.fill); /* Fill value info */ + const H5O_fill_t *fill = &(dset_info->dset->shared->dcpl_cache.fill); /* Fill value info */ H5D_fill_value_t fill_status; /* Fill value status */ /* Check the fill value status */ @@ -1992,16 +2009,17 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, } /* Iterate through nodes in chunk skip list */ - chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); + chunk_node = H5D_CHUNK_GET_FIRST_NODE(dset_info); + while(chunk_node) { - H5D_chunk_info_t *chunk_info; /* Chunk information */ + H5D_piece_info_t *chunk_info; /* Chunk information */ H5D_chunk_ud_t udata; /* Chunk index pass-through */ /* Get the actual chunk information from the skip list node */ - chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); + chunk_info = H5D_CHUNK_GET_NODE_INFO(dset_info, chunk_node); /* Get the info for the chunk in the file */ - if(H5D__chunk_lookup(io_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0) + if(H5D__chunk_lookup(dset_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ @@ -2016,17 +2034,20 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, htri_t cacheable; /* Whether the chunk is cacheable */ /* Set chunk's [scaled] coordinates */ - io_info->store->chunk.scaled = chunk_info->scaled; + dset_info->store->chunk.scaled = chunk_info->scaled; /* Determine if we should use the chunk cache */ - if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0) + if((cacheable = H5D__chunk_cacheable(io_info, dset_info, udata.chunk_block.offset, FALSE)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") if(cacheable) { /* Load the chunk into cache and lock it. */ /* Compute # of bytes accessed in chunk */ H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t); - src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size; + src_accessed_bytes = chunk_info->piece_points * (uint32_t)type_info->src_type_size; + + /* Set chunk's [scaled] coordinates */ + dset_info->store->chunk.scaled = chunk_info->scaled; /* Lock the chunk into the cache */ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE))) @@ -2052,7 +2073,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Perform the actual read operation */ if((io_info->io_ops.single_read)(chk_io_info, type_info, - (hsize_t)chunk_info->chunk_points, chunk_info->fspace, chunk_info->mspace) < 0) + (hsize_t)chunk_info->piece_points, chunk_info->fspace, chunk_info->mspace) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed") /* Release the cache lock on the chunk. */ @@ -2061,7 +2082,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, } /* end if */ /* Advance to next chunk in list */ - chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); + chunk_node = H5D_CHUNK_GET_NEXT_NODE(dset_info, chunk_node); } /* end while */ done: @@ -2084,12 +2105,14 @@ done: static herr_t H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, - H5D_chunk_map_t *fm) + H5D_dset_info_t *dset_info) { H5SL_node_t *chunk_node; /* Current node in chunk skip list */ H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ + H5D_dset_info_t ctg_dset_info; /* Contiguous I/O dset info object */ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ H5D_io_info_t cpt_io_info; /* Compact I/O info object */ + H5D_dset_info_t cpt_dset_info; /* Compact I/O dset info object */ H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */ @@ -2099,30 +2122,35 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Sanity check */ HDassert(io_info); - HDassert(io_info->u.wbuf); + HDassert(dset_info); + HDassert(dset_info->u.wbuf); HDassert(type_info); - HDassert(fm); /* Set up contiguous I/O info object */ HDmemcpy(&ctg_io_info, io_info, sizeof(ctg_io_info)); - ctg_io_info.store = &ctg_store; - ctg_io_info.layout_ops = *H5D_LOPS_CONTIG; + HDmemcpy(&ctg_dset_info, dset_info, sizeof(ctg_dset_info)); + ctg_dset_info.store = &ctg_store; + ctg_dset_info.layout_ops = *H5D_LOPS_CONTIG; + ctg_io_info.dsets_info = &ctg_dset_info; /* Initialize temporary contiguous storage info */ - H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t); + H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, dset_info->dset->shared->layout.u.chunk.size, uint32_t); /* Set up compact I/O info object */ HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info)); - cpt_io_info.store = &cpt_store; - cpt_io_info.layout_ops = *H5D_LOPS_COMPACT; + HDmemcpy(&cpt_dset_info, dset_info, sizeof(cpt_dset_info)); + cpt_dset_info.store = &cpt_store; + cpt_dset_info.layout_ops = *H5D_LOPS_COMPACT; + cpt_io_info.dsets_info = &cpt_dset_info; /* Initialize temporary compact storage info */ cpt_store.compact.dirty = &cpt_dirty; /* Iterate through nodes in chunk skip list */ - chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); + chunk_node = H5D_CHUNK_GET_FIRST_NODE(dset_info); + while(chunk_node) { - H5D_chunk_info_t *chunk_info; /* Chunk information */ + H5D_piece_info_t *chunk_info; /* Chunk information */ H5D_chk_idx_info_t idx_info; /* Chunked index info */ H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */ void *chunk; /* Pointer to locked chunk buffer */ @@ -2131,10 +2159,10 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ /* Get the actual chunk information from the skip list node */ - chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); + chunk_info = H5D_CHUNK_GET_NODE_INFO(dset_info, chunk_node); /* Look up the chunk */ - if(H5D__chunk_lookup(io_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0) + if(H5D__chunk_lookup(dset_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") /* Sanity check */ @@ -2142,10 +2170,10 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); /* Set chunk's [scaled] coordinates */ - io_info->store->chunk.scaled = chunk_info->scaled; + dset_info->store->chunk.scaled = chunk_info->scaled; /* Determine if we should use the chunk cache */ - if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0) + if((cacheable = H5D__chunk_cacheable(io_info, dset_info, udata.chunk_block.offset, TRUE)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") if(cacheable) { /* Load the chunk into cache. But if the whole chunk is written, @@ -2154,14 +2182,17 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Compute # of bytes accessed in chunk */ H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t); - dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size; + dst_accessed_bytes = chunk_info->piece_points * (uint32_t)type_info->dst_type_size; /* Determine if we will access all the data in the chunk */ if(dst_accessed_bytes != ctg_store.contig.dset_size || - (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size || - fm->fsel_type == H5S_SEL_POINTS) + (chunk_info->piece_points * type_info->src_type_size) != ctg_store.contig.dset_size || + dset_info->fsel_type == H5S_SEL_POINTS) entire_chunk = FALSE; + /* Set chunk's [scaled] coordinates */ + dset_info->store->chunk.scaled = chunk_info->scaled; + /* Lock the chunk into the cache */ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") @@ -2176,14 +2207,14 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* If the chunk hasn't been allocated on disk, do so now. */ if(!H5F_addr_defined(udata.chunk_block.offset)) { /* Compose chunked index info struct */ - idx_info.f = io_info->dset->oloc.file; + idx_info.f = dset_info->dset->oloc.file; idx_info.dxpl_id = io_info->md_dxpl_id; - idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline); - idx_info.layout = &(io_info->dset->shared->layout.u.chunk); - idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk); + idx_info.pline = &(dset_info->dset->shared->dcpl_cache.pline); + idx_info.layout = &(dset_info->dset->shared->layout.u.chunk); + idx_info.storage = &(dset_info->dset->shared->layout.storage.u.chunk); /* Set up the size of chunk for user data */ - udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size; + udata.chunk_block.length = dset_info->dset->shared->layout.u.chunk.size; /* Allocate the chunk */ if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0) @@ -2194,7 +2225,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined") /* Cache the new chunk information */ - H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata); + H5D__chunk_cinfo_cache_update(&dset_info->dset->shared->cache.chunk.last, &udata); } /* end if */ /* Set up the storage address information for this chunk */ @@ -2207,9 +2238,13 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, chk_io_info = &ctg_io_info; } /* end else */ + HDassert(TRUE == H5P_isa_class(io_info->md_dxpl_id, H5P_DATASET_XFER)); + HDassert(TRUE == H5P_isa_class(ctg_io_info.md_dxpl_id, H5P_DATASET_XFER)); + HDassert(TRUE == H5P_isa_class(chk_io_info->md_dxpl_id, H5P_DATASET_XFER)); + /* Perform the actual write operation */ if((io_info->io_ops.single_write)(chk_io_info, type_info, - (hsize_t)chunk_info->chunk_points, chunk_info->fspace, chunk_info->mspace) < 0) + (hsize_t)chunk_info->piece_points, chunk_info->fspace, chunk_info->mspace) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed") /* Release the cache lock on the chunk, or insert chunk into index. */ @@ -2218,19 +2253,20 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") } /* end if */ else { - if(need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert) - if((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < 0) + if(need_insert && dset_info->dset->shared->layout.storage.u.chunk.ops->insert) + if((dset_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index") } /* end else */ /* Advance to next chunk in list */ - chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); + chunk_node = H5D_CHUNK_GET_NEXT_NODE(dset_info, chunk_node); } /* end while */ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_write() */ + /*------------------------------------------------------------------------- * Function: H5D__chunk_flush @@ -2277,56 +2313,57 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_flush() */ - /*------------------------------------------------------------------------- - * Function: H5D__chunk_io_term + * Function: H5D__piece_io_term * * Purpose: Destroy I/O operation information. * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Saturday, May 17, 2003 - * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ -static herr_t -H5D__chunk_io_term(const H5D_chunk_map_t *fm) +herr_t +H5D__piece_io_term(H5D_io_info_t *io_info, H5D_dset_info_t *di) { herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_STATIC /* Single element I/O vs. multiple element I/O cleanup */ - if(fm->use_single) { + if(di->use_single) { /* Sanity checks */ - HDassert(fm->sel_chunks == NULL); - HDassert(fm->single_chunk_info); - HDassert(fm->single_chunk_info->fspace_shared); - HDassert(fm->single_chunk_info->mspace_shared); + HDassert(di->dset_sel_pieces == NULL); + HDassert(di->last_piece_info == NULL); + HDassert(di->single_piece_info); + HDassert(di->single_piece_info->fspace_shared); + HDassert(di->single_piece_info->mspace_shared); /* Reset the selection for the single element I/O */ - H5S_select_all(fm->single_space, TRUE); + H5S_select_all(di->single_space, TRUE); } /* end if */ else { - /* Release the nodes on the list of selected chunks */ - if(fm->sel_chunks) - if(H5SL_free(fm->sel_chunks, H5D__free_chunk_info, NULL) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTNEXT, FAIL, "can't iterate over chunks") + /* Release the nodes on the list of selected pieces, or the last (only) + * piece if the skiplist is not available */ + if(di->dset_sel_pieces) { + if(H5SL_free(di->dset_sel_pieces, H5D__free_piece_info, NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't free dataset skip list") + } /* end if */ + else if(di->last_piece_info) { + if(H5D__free_piece_info(di->last_piece_info, NULL, NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't free piece info") + di->last_piece_info = NULL; + } /* end if */ } /* end else */ - /* Free the memory chunk dataspace template */ - if(fm->mchunk_tmpl) - if(H5S_close(fm->mchunk_tmpl) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "can't release memory chunk dataspace template") -#ifdef H5_HAVE_PARALLEL - if(fm->select_chunk) - H5MM_xfree(fm->select_chunk); -#endif /* H5_HAVE_PARALLEL */ + /* Free the memory piece dataspace template */ + if(di->mchunk_tmpl) + if(H5S_close(di->mchunk_tmpl) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "can't release memory piece dataspace template") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__chunk_io_term() */ +} /* end H5D__piece_io_term() */ /*------------------------------------------------------------------------- @@ -3170,7 +3207,7 @@ static void * H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk) { - const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ + const H5D_t *dset = io_info->dsets_info[0].dset; /* Local pointer to the dataset info */ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D__chunk_mem_alloc */ const H5O_pline_t *old_pline = pline; /* Old pipeline, i.e. pipeline used to read the chunk */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ @@ -3188,7 +3225,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, HDassert(io_info); HDassert(io_info->dxpl_cache); - HDassert(io_info->store); + HDassert(io_info->dsets_info[0].store); HDassert(udata); HDassert(dset); HDassert(TRUE == H5P_isa_class(io_info->md_dxpl_id, H5P_DATASET_XFER)); @@ -3215,7 +3252,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* Make sure this is the right chunk */ for(u = 0; u < layout->u.chunk.ndims - 1; u++) - HDassert(io_info->store->chunk.scaled[u] == ent->scaled[u]); + HDassert(io_info->dsets_info[0].store->chunk.scaled[u] == ent->scaled[u]); } #endif /* NDEBUG */ @@ -3334,9 +3371,9 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, else if(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { /* Check if this is an edge chunk */ - if(H5D__chunk_is_partial_edge_chunk(io_info->dset->shared->ndims, - layout->u.chunk.dim, io_info->store->chunk.scaled, - io_info->dset->shared->curr_dims)) { + if(H5D__chunk_is_partial_edge_chunk(dset->shared->ndims, + layout->u.chunk.dim, io_info->dsets_info[0].store->chunk.scaled, + dset->shared->curr_dims)) { /* Disable the filters for both writing and reading */ disable_filters = TRUE; old_pline = NULL; @@ -3451,17 +3488,17 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* See if the chunk can be cached */ if(rdcc->nslots > 0 && chunk_size <= rdcc->nbytes_max) { /* Calculate the index */ - udata->idx_hint = H5D__chunk_hash_val(io_info->dset->shared, udata->common.scaled); + udata->idx_hint = H5D__chunk_hash_val(dset->shared, udata->common.scaled); /* Add the chunk to the cache only if the slot is not already locked */ ent = rdcc->slot[udata->idx_hint]; if(!ent || !ent->locked) { /* Preempt enough things from the cache to make room */ if(ent) { - if(H5D__chunk_cache_evict(io_info->dset, io_info->md_dxpl_id, io_info->dxpl_cache, ent, TRUE) < 0) + if(H5D__chunk_cache_evict(io_info->dsets_info[0].dset, io_info->md_dxpl_id, io_info->dxpl_cache, ent, TRUE) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache") } /* end if */ - if(H5D__chunk_cache_prune(io_info->dset, io_info->md_dxpl_id, io_info->dxpl_cache, chunk_size) < 0) + if(H5D__chunk_cache_prune(io_info->dsets_info[0].dset, io_info->md_dxpl_id, io_info->dxpl_cache, chunk_size) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache") /* Create a new entry */ @@ -3562,11 +3599,12 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, - hbool_t dirty, void *chunk, uint32_t naccessed) +H5D__chunk_unlock(const H5D_io_info_t *io_info, + const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk, uint32_t naccessed) { - const H5O_layout_t *layout = &(io_info->dset->shared->layout); /* Dataset layout */ - const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk); + const H5O_layout_t *layout = &(io_info->dsets_info[0].dset->shared->layout); /* Dataset layout */ + const H5D_rdcc_t *rdcc = &(io_info->dsets_info[0].dset->shared->cache.chunk); + const H5D_t *dset = io_info->dsets_info[0].dset; /* Local pointer to the dataset info */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -3591,8 +3629,8 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, else if(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { /* Check if the chunk is an edge chunk, and disable filters if so */ is_unfiltered_edge_chunk = H5D__chunk_is_partial_edge_chunk( - io_info->dset->shared->ndims, layout->u.chunk.dim, - io_info->store->chunk.scaled, io_info->dset->shared->curr_dims); + dset->shared->ndims, layout->u.chunk.dim, + io_info->dsets_info[0].store->chunk.scaled, dset->shared->curr_dims); } /* end if */ if(dirty) { @@ -3611,13 +3649,13 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, fake_ent.chunk_block.length = udata->chunk_block.length; fake_ent.chunk = (uint8_t *)chunk; - if(H5D__chunk_flush_entry(io_info->dset, io_info->md_dxpl_id, io_info->dxpl_cache, &fake_ent, TRUE) < 0) + if(H5D__chunk_flush_entry(io_info->dsets_info[0].dset, io_info->md_dxpl_id, io_info->dxpl_cache, &fake_ent, TRUE) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer") } /* end if */ else { if(chunk) chunk = H5D__chunk_mem_xfree(chunk, (is_unfiltered_edge_chunk ? NULL - : &(io_info->dset->shared->dcpl_cache.pline))); + : &(io_info->dsets_info[0].dset->shared->dcpl_cache.pline))); } /* end else */ } /* end if */ else { @@ -4216,6 +4254,7 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[]) H5D_io_info_t chk_io_info; /* Chunked I/O info object */ H5D_chunk_ud_t chk_udata; /* User data for locking chunk */ H5D_storage_t chk_store; /* Chunk storage information */ + H5D_dset_info_t chk_dset_info; /* Chunked I/O dset info object */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ void *chunk; /* The file chunk */ @@ -4261,7 +4300,16 @@ H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[]) * Note that we only need to set chunk_offset once, as the array's address * will never change. */ chk_store.chunk.scaled = chunk_sc; - H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, H5AC_rawdata_dxpl_id, &chk_store, NULL); + + chk_io_info.dxpl_cache = dxpl_cache; + chk_io_info.md_dxpl_id = dxpl_id; + chk_io_info.op_type = H5D_IO_OP_READ; + chk_io_info.raw_dxpl_id = H5AC_rawdata_dxpl_id; + + chk_dset_info.dset = dset; + chk_dset_info.store = &chk_store; + chk_dset_info.u.rbuf = NULL; + chk_io_info.dsets_info = &chk_dset_info; /* * Determine the edges of the dataset which need to be modified @@ -4535,7 +4583,7 @@ static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk) { const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */ - const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */ + const H5D_t *dset = io_info->dsets_info[0].dset; /* Local pointer to the dataset info */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */ unsigned rank = udata->common.layout->ndims - 1; /* Dataset rank */ const hsize_t *scaled = udata->common.scaled; /* Scaled chunk offset */ @@ -4749,6 +4797,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) hbool_t new_unfilt_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension are newly unfiltered */ H5D_chk_idx_info_t idx_info; /* Chunked index info */ H5D_io_info_t chk_io_info; /* Chunked I/O info object */ + H5D_dset_info_t chk_dset_info; /* Chunked I/O dset info object */ H5D_storage_t chk_store; /* Chunk storage information */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ @@ -4823,9 +4872,17 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) * Note that we only need to set scaled once, as the array's address * will never change. */ chk_store.chunk.scaled = scaled; - H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, H5AC_rawdata_dxpl_id, &chk_store, NULL); + + chk_io_info.dxpl_cache = dxpl_cache; + chk_io_info.md_dxpl_id = dxpl_id; + chk_io_info.op_type = H5D_IO_OP_READ; chk_io_info.raw_dxpl_id = H5AC_rawdata_dxpl_id; + chk_dset_info.dset = dset; + chk_dset_info.store = &chk_store; + chk_dset_info.u.rbuf = NULL; + chk_io_info.dsets_info = &chk_dset_info; + /* Compose chunked index info struct */ idx_info.f = dset->oloc.file; idx_info.dxpl_id = dxpl_id; @@ -5119,7 +5176,7 @@ herr_t H5D__chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[]) { H5D_chk_idx_info_t idx_info; /* Chunked index info */ - const H5D_t *dset = io_info->dset; /* Local pointer to dataset info */ + const H5D_t *dset = io_info->dsets_info[0].dset; /* Local pointer to dataset info */ H5D_chunk_it_ud2_t udata; /* User data for iteration callback */ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk); herr_t ret_value = SUCCEED; /* Return value */ @@ -6144,8 +6201,8 @@ H5D__nonexistent_readvv(const H5D_io_info_t *io_info, HDassert(mem_off_arr); /* Set up user data for H5VM_opvv() */ - udata.rbuf = (unsigned char *)io_info->u.rbuf; - udata.dset = io_info->dset; + udata.rbuf = (unsigned char *)io_info->dsets_info[0].u.rbuf; + udata.dset = io_info->dsets_info[0].dset; udata.dxpl_id = io_info->md_dxpl_id; /* Call generic sequence operation routine */ diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index 224a1d1..4716bca 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -60,9 +60,9 @@ /* Layout operation callbacks */ static herr_t H5D__compact_construct(H5F_t *f, H5D_t *dset); static hbool_t H5D__compact_is_space_alloc(const H5O_storage_t *storage); -static herr_t H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *cm); +static herr_t H5D__compact_io_init(H5D_io_info_t *io_info, + const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, + const H5S_t *mem_space, H5D_dset_info_t *dinfo); static ssize_t H5D__compact_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[]); @@ -251,14 +251,14 @@ H5D__compact_is_space_alloc(const H5O_storage_t H5_ATTR_UNUSED *storage) *------------------------------------------------------------------------- */ static herr_t -H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, +H5D__compact_io_init(H5D_io_info_t H5_ATTR_UNUSED *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *cm) + H5D_dset_info_t *dinfo) { FUNC_ENTER_STATIC_NOERR - io_info->store->compact.buf = io_info->dset->shared->layout.storage.u.compact.buf; - io_info->store->compact.dirty = &io_info->dset->shared->layout.storage.u.compact.dirty; + dinfo->store->compact.buf = dinfo->dset->shared->layout.storage.u.compact.buf; + dinfo->store->compact.dirty = &dinfo->dset->shared->layout.storage.u.compact.dirty; FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__compact_io_init() */ @@ -294,7 +294,9 @@ H5D__compact_readvv(const H5D_io_info_t *io_info, HDassert(io_info); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value = H5VM_memcpyvv(io_info->u.rbuf, mem_max_nseq, mem_curr_seq, mem_size_arr, mem_offset_arr, io_info->store->compact.buf, dset_max_nseq, dset_curr_seq, dset_size_arr, dset_offset_arr)) < 0) + if((ret_value = H5VM_memcpyvv(io_info->dsets_info[0].u.rbuf, mem_max_nseq, mem_curr_seq, + mem_size_arr, mem_offset_arr, io_info->dsets_info[0].store->compact.buf, + dset_max_nseq, dset_curr_seq, dset_size_arr, dset_offset_arr)) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed") done: @@ -335,11 +337,14 @@ H5D__compact_writevv(const H5D_io_info_t *io_info, HDassert(io_info); /* Use the vectorized memory copy routine to do actual work */ - if((ret_value = H5VM_memcpyvv(io_info->store->compact.buf, dset_max_nseq, dset_curr_seq, dset_size_arr, dset_offset_arr, io_info->u.wbuf, mem_max_nseq, mem_curr_seq, mem_size_arr, mem_offset_arr)) < 0) + if((ret_value = H5VM_memcpyvv(io_info->dsets_info[0].store->compact.buf, dset_max_nseq, + dset_curr_seq, dset_size_arr, dset_offset_arr, + io_info->dsets_info[0].u.wbuf, mem_max_nseq, mem_curr_seq, + mem_size_arr, mem_offset_arr)) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed") /* Mark the compact dataset's buffer as dirty */ - *io_info->store->compact.dirty = TRUE; + *io_info->dsets_info[0].store->compact.dirty = TRUE; done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index a24abe6..6f7f05e 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -98,9 +98,8 @@ typedef struct H5D_contig_writevv_ud_t { static herr_t H5D__contig_construct(H5F_t *f, H5D_t *dset); static herr_t H5D__contig_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id); -static herr_t H5D__contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *cm); +static herr_t H5D__contig_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, + hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, H5D_dset_info_t *dinfo); static ssize_t H5D__contig_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); @@ -127,13 +126,13 @@ const H5D_layout_ops_t H5D_LOPS_CONTIG[1] = {{ H5D__contig_read, H5D__contig_write, #ifdef H5_HAVE_PARALLEL - H5D__contig_collective_read, - H5D__contig_collective_write, + H5D__collective_read, + H5D__collective_write, #endif /* H5_HAVE_PARALLEL */ H5D__contig_readvv, H5D__contig_writevv, H5D__contig_flush, - NULL, + H5D__piece_io_term, NULL }}; @@ -148,6 +147,8 @@ H5FL_BLK_DEFINE(sieve_buf); /* Declare extern the free list to manage blocks of type conversion data */ H5FL_BLK_EXTERN(type_conv); +/* Declare extern the free list to manage the H5D_piece_info_t struct */ +H5FL_EXTERN(H5D_piece_info_t); /*------------------------------------------------------------------------- @@ -199,6 +200,7 @@ H5D__contig_fill(const H5D_io_info_t *io_info) { const H5D_t *dset = io_info->dset; /* the dataset pointer */ H5D_io_info_t ioinfo; /* Dataset I/O info */ + H5D_dset_info_t dset_info; /* Dset info */ H5D_storage_t store; /* Union of storage info for dataset */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ @@ -270,7 +272,15 @@ H5D__contig_fill(const H5D_io_info_t *io_info) offset = 0; /* Simple setup for dataset I/O info struct */ - H5D_BUILD_IO_INFO_WRT(&ioinfo, dset, dxpl_cache, H5AC_ind_read_dxpl_id, raw_dxpl_id, &store, fb_info.fill_buf); + ioinfo.dxpl_cache = dxpl_cache; + ioinfo.op_type = H5D_IO_OP_WRITE; + ioinfo.md_dxpl_id = H5AC_ind_read_dxpl_id; + ioinfo.raw_dxpl_id = raw_dxpl_id; + + dset_info.dset = (H5D_t *)dset; + dset_info.store = &store; + dset_info.u.wbuf = fb_info.fill_buf; + ioinfo.dsets_info = &dset_info; /* * Fill the entire current extent with the fill value. We can do @@ -403,7 +413,7 @@ H5D__contig_construct(H5F_t *f, H5D_t *dset) /* * The maximum size of the dataset cannot exceed the storage size. - * Also, only the slowest varying dimension of a simple data space + * Also, only the slowest varying dimension of a simple dataspace * can be extendible (currently only for external data storage). */ @@ -555,22 +565,205 @@ H5D__contig_is_space_alloc(const H5O_storage_t *storage) * * Return: Non-negative on success/Negative on failure * - * Programmer: Quincey Koziol - * Thursday, March 20, 2008 - * + * Programmer: Jonathan Kim *------------------------------------------------------------------------- */ static herr_t -H5D__contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, - hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *cm) +H5D__contig_io_init(H5D_io_info_t *io_info, + const H5D_type_info_t H5_ATTR_UNUSED *type_info, hsize_t nelmts, + const H5S_t *file_space, const H5S_t *mem_space, H5D_dset_info_t *dinfo) { - FUNC_ENTER_STATIC_NOERR + H5D_t *dataset = dinfo->dset; /* Local pointer to dataset info */ + + hssize_t old_offset[H5O_LAYOUT_NDIMS]; /* Old selection offset */ + htri_t file_space_normalized = FALSE; /* File dataspace was normalized */ + + int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ + int sf_ndims; /* The number of dimensions of the file dataspace (signed) */ + H5S_class_t fsclass_type; /* file space class type */ + H5S_sel_type fsel_type; /* file space selection type */ + hbool_t sel_hyper_flag; + + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + dinfo->store->contig.dset_addr = dataset->shared->layout.storage.u.contig.addr; + dinfo->store->contig.dset_size = dataset->shared->layout.storage.u.contig.size; + + /* Get layout for dataset */ + dinfo->layout = &(dataset->shared->layout); + /* num of element selected */ + dinfo->nelmts = nelmts; + + /* Check if the memory space is scalar & make equivalent memory space */ + if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space)) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number") + /* Set the number of dimensions for the memory dataspace */ + H5_CHECKED_ASSIGN(dinfo->m_ndims, unsigned, sm_ndims, int); + + /* Get dim number and dimensionality for each dataspace */ + if((sf_ndims = H5S_GET_EXTENT_NDIMS(file_space)) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number") + /* Set the number of dimensions for the file dataspace */ + H5_CHECKED_ASSIGN(dinfo->f_ndims, unsigned, sf_ndims, int); + + if(H5S_get_simple_extent_dims(file_space, dinfo->f_dims, NULL) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality") + + /* Normalize hyperslab selections by adjusting them by the offset */ + /* (It might be worthwhile to normalize both the file and memory dataspaces + * before any (contiguous, chunked, etc) file I/O operation, in order to + * speed up hyperslab calculations by removing the extra checks and/or + * additions involving the offset and the hyperslab selection -QAK) + */ + if((file_space_normalized = H5S_hyper_normalize_offset((H5S_t *)file_space, old_offset)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset") + + /* Initialize "last chunk" information */ + dinfo->last_index = (hsize_t)-1; + dinfo->last_piece_info = NULL; + + /* Point at the dataspaces */ + dinfo->file_space = file_space; + dinfo->mem_space = mem_space; + + /* Only need single skip list point over multiple read/write IO + * and multiple dsets until H5D_close. Thus check both + * since io_info->sel_pieces only lives single write/read IO, + * even cache.sel_pieces lives until Dclose */ + if(NULL == dataset->shared->cache.sel_pieces && + NULL == io_info->sel_pieces) { + if(NULL == (dataset->shared->cache.sel_pieces = H5SL_create(H5SL_TYPE_HADDR, NULL))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for piece selections") + + /* keep the skip list in cache, so do not need to recreate until close */ + io_info->sel_pieces = dataset->shared->cache.sel_pieces; + } /* end if */ + + /* this is need when multiple write/read occurs on the same dsets, + * just pass the previously created pointer */ + if (NULL == io_info->sel_pieces) + io_info->sel_pieces = dataset->shared->cache.sel_pieces; + + HDassert(io_info->sel_pieces); + + /* We are not using single element mode */ + dinfo->use_single = FALSE; + + /* Get type of space class on disk */ + if((fsclass_type = H5S_GET_EXTENT_TYPE(file_space)) < H5S_SCALAR) + HGOTO_ERROR(H5E_FSPACE, H5E_BADTYPE, FAIL, "unable to get fspace class type") + + /* Get type of selection on disk & in memory */ + if((fsel_type = H5S_GET_SELECT_TYPE(file_space)) < H5S_SEL_NONE) + HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection") + if((dinfo->msel_type = H5S_GET_SELECT_TYPE(mem_space)) < H5S_SEL_NONE) + HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to get type of selection") + + /* if class type is scalar or null for contiguous dset */ + if(fsclass_type == H5S_SCALAR || fsclass_type == H5S_NULL) + sel_hyper_flag = FALSE; + /* if class type is H5S_SIMPLE & if selection is NONE or POINTS */ + else if(fsel_type == H5S_SEL_POINTS || fsel_type == H5S_SEL_NONE) + sel_hyper_flag = FALSE; + else + sel_hyper_flag = TRUE; + + /* if selected elements exist */ + if (dinfo->nelmts) { + unsigned u; + H5D_piece_info_t *new_piece_info; /* piece information to insert into skip list */ + + /* Get copy of dset file_space, so it can be changed temporarily + * purpose + * This tmp_fspace allows multiple write before close dset */ + H5S_t *tmp_fspace; /* Temporary file dataspace */ + /* Create "temporary" chunk for selection operations (copy file space) */ + if(NULL == (tmp_fspace = H5S_copy(dinfo->file_space, TRUE, FALSE))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space") + + /* Actions specific to hyperslab selections */ + if(sel_hyper_flag) { + /* Sanity check */ + HDassert(dinfo->f_ndims > 0); + + /* Make certain selections are stored in span tree form (not "optimized hyperslab" or "all") */ + if(H5S_hyper_convert(tmp_fspace) < 0) { + (void)H5S_close(tmp_fspace); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to convert selection to span trees") + } /* end if */ + } /* end if */ + + /* Add temporary chunk to the list of pieces */ + /* collect piece_info into Skip List */ + /* Allocate the file & memory chunk information */ + if (NULL==(new_piece_info = H5FL_MALLOC (H5D_piece_info_t))) { + (void)H5S_close(tmp_fspace); + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info") + } /* end if */ + + /* Set the piece index */ + new_piece_info->index = 0; + + /* Set the file chunk dataspace */ + new_piece_info->fspace = tmp_fspace; + new_piece_info->fspace_shared = FALSE; + + /* Set the memory chunk dataspace */ + /* same as one chunk, just use dset mem space */ + new_piece_info->mspace = mem_space; + + /* set true for sharing mem space with dset, which means + * fspace gets free by applicaton H5Sclose(), and + * doesn't require providing layout_ops.io_term() for H5D_LOPS_CONTIG. + */ + new_piece_info->mspace_shared = TRUE; + + /* Copy the piece's coordinates */ + for(u = 0; u < dinfo->f_ndims; u++) + new_piece_info->scaled[u] = 0; + new_piece_info->scaled[dinfo->f_ndims] = 0; + + /* make connection to related dset info from this piece_info */ + new_piece_info->dset_info = dinfo; + + /* get dset file address for piece */ + new_piece_info->faddr = dinfo->dset->shared->layout.storage.u.contig.addr; + + /* Save piece to last_piece_info so it is freed at the end of the + * operation */ + dinfo->last_piece_info = new_piece_info; + + /* insert piece info */ + if(H5SL_insert(io_info->sel_pieces, new_piece_info, &new_piece_info->faddr) < 0) { + /* mimic H5D__free_piece_info */ + H5S_select_all(new_piece_info->fspace, TRUE); + H5FL_FREE(H5D_piece_info_t, new_piece_info); + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINSERT, FAIL, "can't insert chunk into skip list") + } /* end if */ + H5_CHECKED_ASSIGN(new_piece_info->piece_points, uint32_t, nelmts, hssize_t); + + /* only scratch for this dset */ + /* Clean hyperslab span's "scratch" information */ + if(sel_hyper_flag) + if(H5S_hyper_reset_scratch(new_piece_info->fspace) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset span scratch info") + } /* end if */ + +done: + if(ret_value < 0) { + if(H5D__piece_io_term(io_info, dinfo) < 0) + HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release chunk mapping") + } /* end if */ - io_info->store->contig.dset_addr = io_info->dset->shared->layout.storage.u.contig.addr; - io_info->store->contig.dset_size = io_info->dset->shared->layout.storage.u.contig.size; + if(file_space_normalized) { + /* (Casting away const OK -QAK) */ + if(H5S_hyper_denormalize_offset((H5S_t *)file_space, old_offset) < 0) + HDONE_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset") + } /* end if */ - FUNC_LEAVE_NOAPI(SUCCEED) + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__contig_io_init() */ @@ -589,7 +782,7 @@ H5D__contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_ herr_t H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *fm) + H5D_dset_info_t *dinfo) { herr_t ret_value = SUCCEED; /*return value */ @@ -597,11 +790,13 @@ H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Sanity check */ HDassert(io_info); - HDassert(io_info->u.rbuf); + HDassert(dinfo->u.rbuf); HDassert(type_info); HDassert(mem_space); HDassert(file_space); + io_info->dset = io_info->dsets_info[0].dset; + /* Read data */ if((io_info->io_ops.single_read)(io_info, type_info, nelmts, file_space, mem_space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "contiguous read failed") @@ -626,7 +821,7 @@ done: herr_t H5D__contig_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *fm) + H5D_dset_info_t *dinfo) { herr_t ret_value = SUCCEED; /*return value */ @@ -634,11 +829,13 @@ H5D__contig_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Sanity check */ HDassert(io_info); - HDassert(io_info->u.wbuf); + HDassert(dinfo->u.wbuf); HDassert(type_info); HDassert(mem_space); HDassert(file_space); + io_info->dset = io_info->dsets_info[0].dset; + /* Write data */ if((io_info->io_ops.single_write)(io_info, type_info, nelmts, file_space, mem_space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "contiguous write failed") @@ -912,6 +1109,7 @@ H5D__contig_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_off_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_off_arr[]) { + H5D_dset_info_t dset_info; ssize_t ret_value = -1; /* Return value */ FUNC_ENTER_STATIC @@ -925,15 +1123,17 @@ H5D__contig_readvv(const H5D_io_info_t *io_info, HDassert(mem_len_arr); HDassert(mem_off_arr); + dset_info = io_info->dsets_info[0]; + /* Check if data sieving is enabled */ - if(H5F_HAS_FEATURE(io_info->dset->oloc.file, H5FD_FEAT_DATA_SIEVE)) { + if(H5F_HAS_FEATURE(dset_info.dset->oloc.file, H5FD_FEAT_DATA_SIEVE)) { H5D_contig_readvv_sieve_ud_t udata; /* User data for H5VM_opvv() operator */ /* Set up user data for H5VM_opvv() */ - udata.file = io_info->dset->oloc.file; - udata.dset_contig = &(io_info->dset->shared->cache.contig); - udata.store_contig = &(io_info->store->contig); - udata.rbuf = (unsigned char *)io_info->u.rbuf; + udata.file = dset_info.dset->oloc.file; + udata.dset_contig = &(dset_info.dset->shared->cache.contig); + udata.store_contig = &(dset_info.store->contig); + udata.rbuf = (unsigned char *)dset_info.u.rbuf; udata.dxpl_id = io_info->raw_dxpl_id; /* Call generic sequence operation routine */ @@ -946,9 +1146,9 @@ H5D__contig_readvv(const H5D_io_info_t *io_info, H5D_contig_readvv_ud_t udata; /* User data for H5VM_opvv() operator */ /* Set up user data for H5VM_opvv() */ - udata.file = io_info->dset->oloc.file; - udata.dset_addr = io_info->store->contig.dset_addr; - udata.rbuf = (unsigned char *)io_info->u.rbuf; + udata.file = dset_info.dset->oloc.file; + udata.dset_addr = dset_info.store->contig.dset_addr; + udata.rbuf = (unsigned char *)dset_info.u.rbuf; udata.dxpl_id = io_info->raw_dxpl_id; /* Call generic sequence operation routine */ @@ -1238,6 +1438,7 @@ H5D__contig_writevv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_off_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_off_arr[]) { + H5D_dset_info_t dset_info; ssize_t ret_value = -1; /* Return value (Size of sequence in bytes) */ FUNC_ENTER_STATIC @@ -1251,15 +1452,17 @@ H5D__contig_writevv(const H5D_io_info_t *io_info, HDassert(mem_len_arr); HDassert(mem_off_arr); + dset_info = io_info->dsets_info[0]; + /* Check if data sieving is enabled */ - if(H5F_HAS_FEATURE(io_info->dset->oloc.file, H5FD_FEAT_DATA_SIEVE)) { + if(H5F_HAS_FEATURE(dset_info.dset->oloc.file, H5FD_FEAT_DATA_SIEVE)) { H5D_contig_writevv_sieve_ud_t udata; /* User data for H5VM_opvv() operator */ /* Set up user data for H5VM_opvv() */ - udata.file = io_info->dset->oloc.file; - udata.dset_contig = &(io_info->dset->shared->cache.contig); - udata.store_contig = &(io_info->store->contig); - udata.wbuf = (const unsigned char *)io_info->u.wbuf; + udata.file = dset_info.dset->oloc.file; + udata.dset_contig = &(dset_info.dset->shared->cache.contig); + udata.store_contig = &(dset_info.store->contig); + udata.wbuf = (const unsigned char *)dset_info.u.wbuf; udata.dxpl_id = io_info->raw_dxpl_id; /* Call generic sequence operation routine */ @@ -1272,9 +1475,9 @@ H5D__contig_writevv(const H5D_io_info_t *io_info, H5D_contig_writevv_ud_t udata; /* User data for H5VM_opvv() operator */ /* Set up user data for H5VM_opvv() */ - udata.file = io_info->dset->oloc.file; - udata.dset_addr = io_info->store->contig.dset_addr; - udata.wbuf = (const unsigned char *)io_info->u.wbuf; + udata.file = dset_info.dset->oloc.file; + udata.dset_addr = dset_info.store->contig.dset_addr; + udata.wbuf = (const unsigned char *)dset_info.u.wbuf; udata.dxpl_id = io_info->raw_dxpl_id; /* Call generic sequence operation routine */ diff --git a/src/H5Defl.c b/src/H5Defl.c index cf1b36c..bf08568 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -67,9 +67,9 @@ typedef struct H5D_efl_writevv_ud_t { /* Layout operation callbacks */ static herr_t H5D__efl_construct(H5F_t *f, H5D_t *dset); -static herr_t H5D__efl_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *cm); +static herr_t H5D__efl_io_init(H5D_io_info_t *io_info, + const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, + const H5S_t *mem_space, H5D_dset_info_t *dinfo); static ssize_t H5D__efl_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); @@ -222,13 +222,13 @@ H5D__efl_is_space_alloc(const H5O_storage_t H5_ATTR_UNUSED *storage) *------------------------------------------------------------------------- */ static herr_t -H5D__efl_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, +H5D__efl_io_init(H5D_io_info_t H5_ATTR_UNUSED *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *cm) + H5D_dset_info_t *dinfo) { FUNC_ENTER_STATIC_NOERR - HDmemcpy(&io_info->store->efl, &(io_info->dset->shared->dcpl_cache.efl), sizeof(H5O_efl_t)); + HDmemcpy(&dinfo->store->efl, &(dinfo->dset->shared->dcpl_cache.efl), sizeof(H5O_efl_t)); FUNC_LEAVE_NOAPI(SUCCEED) } /* end H5D__efl_io_init() */ @@ -474,8 +474,8 @@ H5D__efl_readvv(const H5D_io_info_t *io_info, /* Check args */ HDassert(io_info); - HDassert(io_info->store->efl.nused > 0); - HDassert(io_info->u.rbuf); + HDassert(io_info->dsets_info[0].store->efl.nused > 0); + HDassert(io_info->dsets_info[0].u.rbuf); HDassert(io_info->dset); HDassert(io_info->dset->shared); HDassert(io_info->dset->shared->extfile_prefix); @@ -487,9 +487,9 @@ H5D__efl_readvv(const H5D_io_info_t *io_info, HDassert(mem_off_arr); /* Set up user data for H5VM_opvv() */ - udata.efl = &(io_info->store->efl); + udata.efl = &(io_info->dsets_info[0].store->efl); udata.dset = io_info->dset; - udata.rbuf = (unsigned char *)io_info->u.rbuf; + udata.rbuf = (unsigned char *)io_info->dsets_info[0].u.rbuf; /* Call generic sequence operation routine */ if((ret_value = H5VM_opvv(dset_max_nseq, dset_curr_seq, dset_len_arr, dset_off_arr, @@ -558,8 +558,8 @@ H5D__efl_writevv(const H5D_io_info_t *io_info, /* Check args */ HDassert(io_info); - HDassert(io_info->store->efl.nused > 0); - HDassert(io_info->u.wbuf); + HDassert(io_info->dsets_info[0].store->efl.nused > 0); + HDassert(io_info->dsets_info[0].u.wbuf); HDassert(io_info->dset); HDassert(io_info->dset->shared); HDassert(io_info->dset->shared->extfile_prefix); @@ -571,9 +571,9 @@ H5D__efl_writevv(const H5D_io_info_t *io_info, HDassert(mem_off_arr); /* Set up user data for H5VM_opvv() */ - udata.efl = &(io_info->store->efl); + udata.efl = &(io_info->dsets_info[0].store->efl); udata.dset = io_info->dset; - udata.wbuf = (const unsigned char *)io_info->u.wbuf; + udata.wbuf = (const unsigned char *)io_info->dsets_info[0].u.wbuf; /* Call generic sequence operation routine */ if((ret_value = H5VM_opvv(dset_max_nseq, dset_curr_seq, dset_len_arr, dset_off_arr, diff --git a/src/H5Dint.c b/src/H5Dint.c index 0355656..18be105 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -100,8 +100,8 @@ H5FL_DEFINE_STATIC(H5D_shared_t); /* Declare the external PQ free list for the sieve buffer information */ H5FL_BLK_EXTERN(sieve_buf); -/* Declare the external free list to manage the H5D_chunk_info_t struct */ -H5FL_EXTERN(H5D_chunk_info_t); +/* Declare the external free list to manage the H5D_piece_info_t struct */ +H5FL_EXTERN(H5D_piece_info_t); /* Declare extern the free list to manage blocks of type conversion data */ H5FL_BLK_EXTERN(type_conv); @@ -503,7 +503,7 @@ done: /*------------------------------------------------------------------------- * Function: H5D__get_space_status * - * Purpose: Returns the status of data space allocation. + * Purpose: Returns the status of dataspace allocation. * * Return: * Success: Non-negative @@ -1709,9 +1709,17 @@ H5D_close(H5D_t *dataset) /* Free cached information for each kind of dataset */ switch(dataset->shared->layout.type) { case H5D_CONTIGUOUS: + /* Check for skip list for iterating over pieces during I/O to close */ + if(dataset->shared->cache.sel_pieces) { + HDassert(H5SL_count(dataset->shared->cache.sel_pieces) == 0); + H5SL_close(dataset->shared->cache.sel_pieces); + dataset->shared->cache.sel_pieces = NULL; + } /* end if */ + /* Free the data sieve buffer, if it's been allocated */ if(dataset->shared->cache.contig.sieve_buf) dataset->shared->cache.contig.sieve_buf = (unsigned char *)H5FL_BLK_FREE(sieve_buf,dataset->shared->cache.contig.sieve_buf); + break; case H5D_CHUNKED: @@ -1722,6 +1730,13 @@ H5D_close(H5D_t *dataset) dataset->shared->cache.chunk.sel_chunks = NULL; } /* end if */ + /* Check for skip list for iterating over pieces during I/O to close */ + if(dataset->shared->cache.sel_pieces) { + HDassert(H5SL_count(dataset->shared->cache.sel_pieces) == 0); + H5SL_close(dataset->shared->cache.sel_pieces); + dataset->shared->cache.sel_pieces = NULL; + } /* end if */ + /* Check for cached single chunk dataspace */ if(dataset->shared->cache.chunk.single_space) { (void)H5S_close(dataset->shared->cache.chunk.single_space); @@ -1729,10 +1744,9 @@ H5D_close(H5D_t *dataset) } /* end if */ /* Check for cached single element chunk info */ - if(dataset->shared->cache.chunk.single_chunk_info) { - dataset->shared->cache.chunk.single_chunk_info = H5FL_FREE(H5D_chunk_info_t, dataset->shared->cache.chunk.single_chunk_info); - dataset->shared->cache.chunk.single_chunk_info = NULL; - } /* end if */ + if(dataset->shared->cache.chunk.single_piece_info) + dataset->shared->cache.chunk.single_piece_info = + H5FL_FREE(H5D_piece_info_t, dataset->shared->cache.chunk.single_piece_info); break; case H5D_COMPACT: @@ -2393,6 +2407,7 @@ herr_t H5D__vlen_get_buf_size(void H5_ATTR_UNUSED *elem, hid_t type_id, unsigned H5_ATTR_UNUSED ndim, const hsize_t *point, void *op_data) { H5D_vlen_bufsize_t *vlen_bufsize = (H5D_vlen_bufsize_t *)op_data; + H5D_dset_info_t *dset_info = NULL; /* Internal multi-dataset info placeholder */ H5T_t *dt; /* Datatype for operation */ herr_t ret_value = SUCCEED; /* Return value */ @@ -2413,11 +2428,30 @@ H5D__vlen_get_buf_size(void H5_ATTR_UNUSED *elem, hid_t type_id, unsigned H5_ATT if(H5S_select_elements(vlen_bufsize->fspace, H5S_SELECT_SET, (size_t)1, point) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't select point") - /* Read in the point (with the custom VL memory allocator) */ - if(H5D__read(vlen_bufsize->dset, type_id, vlen_bufsize->mspace, vlen_bufsize->fspace, vlen_bufsize->xfer_pid, vlen_bufsize->fl_tbuf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read point") + { + hid_t file_id; /* File ID for operation */ + + /* Alloc dset_info */ + if(NULL == (dset_info = (H5D_dset_info_t *)H5MM_calloc(sizeof(H5D_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset info array buffer") + + dset_info->dset = vlen_bufsize->dset; + dset_info->mem_space = vlen_bufsize->mspace; + dset_info->file_space = vlen_bufsize->fspace; + dset_info->u.rbuf = vlen_bufsize->fl_tbuf; + dset_info->mem_type_id = type_id; + + /* Retrieve file_id */ + file_id = H5F_FILE_ID(dset_info->dset->oloc.file); + + /* Read in the point (with the custom VL memory allocator) */ + if(H5D__read(file_id, vlen_bufsize->xfer_pid, 1, dset_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + } done: + if(dset_info) + H5MM_xfree(dset_info); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__vlen_get_buf_size() */ diff --git a/src/H5Dio.c b/src/H5Dio.c index 7b3f553..255d4e4 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -51,25 +51,27 @@ /* Local Prototypes */ /********************/ -/* Internal I/O routines */ -static herr_t H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id, - const H5S_t *mem_space, const H5S_t *file_space, hid_t dxpl_id, const void *buf); +/* Internal I/O routines for single-dset */ +static herr_t H5D__pre_read(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info); +static herr_t H5D__pre_write(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info); + +/* Internal I/O routines for multi-dset */ + /* Setup/teardown routines */ static herr_t H5D__ioinfo_init(H5D_t *dset, #ifndef H5_HAVE_PARALLEL -const + const #endif /* H5_HAVE_PARALLEL */ - H5D_dxpl_cache_t *dxpl_cache, - hid_t dxpl_id, const H5D_type_info_t *type_info, H5D_storage_t *store, - H5D_io_info_t *io_info); + H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H5D_dset_info_t *dset_info, + H5D_storage_t *store, H5D_io_info_t *io_info); static herr_t H5D__typeinfo_init(const H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, hid_t mem_type_id, hbool_t do_write, H5D_type_info_t *type_info); #ifdef H5_HAVE_PARALLEL -static herr_t H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, - hid_t dxpl_id, const H5S_t *file_space, const H5S_t *mem_space, - const H5D_type_info_t *type_info, const H5D_chunk_map_t *fm); +static herr_t H5D__ioinfo_adjust(const size_t count, H5D_io_info_t *io_info, hid_t dxpl_id); static herr_t H5D__ioinfo_term(H5D_io_info_t *io_info); #endif /* H5_HAVE_PARALLEL */ static herr_t H5D__typeinfo_term(const H5D_type_info_t *type_info); @@ -90,6 +92,95 @@ H5FL_BLK_DEFINE(type_conv); /*------------------------------------------------------------------------- + * Function: H5D__init_dset_info + * + * Purpose: Initializes a H5D_dset_info_t from a set of user parameters, + * while checking parameters too. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Friday, August 29, 2014 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__init_dset_info(H5D_dset_info_t *dset_info, hid_t dset_id, + hid_t mem_type_id, hid_t mem_space_id, hid_t dset_space_id, + const H5D_dset_buf_t *u_buf) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Get dataset */ + if(NULL == (dset_info->dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") + if(NULL == dset_info->dset->oloc.file) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file") + + /* Check for invalid space IDs */ + if(mem_space_id < 0 || dset_space_id < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") + + /* Get file dataspace */ + if(H5S_ALL != dset_space_id) { + if(NULL == (dset_info->file_space = (const H5S_t *)H5I_object_verify(dset_space_id, H5I_DATASPACE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") + + /* Check for valid selection */ + if(H5S_SELECT_VALID(dset_info->file_space) != TRUE) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "file selection+offset not within extent") + } /* end if */ + else + dset_info->file_space = dset_info->dset->shared->space; + + /* Get memory dataspace */ + if(H5S_ALL != mem_space_id) { + if(NULL == (dset_info->mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") + + /* Check for valid selection */ + if(H5S_SELECT_VALID(dset_info->mem_space) != TRUE) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "memory selection+offset not within extent") + } /* end if */ + else + dset_info->mem_space = dset_info->file_space; + + /* Get memory datatype */ + dset_info->mem_type_id = mem_type_id; + + /* Get buffer */ + dset_info->u = *u_buf; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__init_dset_info() */ + +static hid_t +H5D__verify_location(size_t count, const H5D_dset_info_t *info) +{ + hid_t file_id; + size_t u; + hid_t ret_value = FAIL; /* Return value */ + + FUNC_ENTER_STATIC + + file_id = H5F_FILE_ID(info[0].dset->oloc.file); + + for(u = 1; u < count; u++) { + if(file_id != H5F_FILE_ID(info[u].dset->oloc.file)) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dataset's file ID doesn't match file_id parameter") + } /* end for */ + + ret_value = file_id; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__verify_location */ + + +/*------------------------------------------------------------------------- * Function: H5Dread * * Purpose: Reads (part of) a DSET from the file into application @@ -100,14 +191,14 @@ H5FL_BLK_DEFINE(type_conv); * passed to this function with the PLIST_ID argument. * * The FILE_SPACE_ID can be the constant H5S_ALL which indicates - * that the entire file data space is to be referenced. + * that the entire file dataspace is to be referenced. * * The MEM_SPACE_ID can be the constant H5S_ALL in which case - * the memory data space is the same as the file data space + * the memory dataspace is the same as the file dataspace * defined when the dataset was created. * - * The number of elements in the memory data space must match - * the number of elements in the file data space. + * The number of elements in the memory dataspace must match + * the number of elements in the file dataspace. * * The PLIST_ID can be the constant H5P_DEFAULT in which * case the default data transfer properties are used. @@ -121,57 +212,193 @@ H5FL_BLK_DEFINE(type_conv); */ herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, - hid_t file_space_id, hid_t plist_id, void *buf/*out*/) + hid_t file_space_id, hid_t dxpl_id, void *buf/*out*/) { - H5D_t *dset = NULL; - const H5S_t *mem_space = NULL; - const H5S_t *file_space = NULL; - herr_t ret_value = SUCCEED; /* Return value */ + H5D_dset_info_t *dset_info = NULL; /* Internal multi-dataset info placeholder */ + H5D_dset_buf_t u_buf; /* Buffer pointer */ + hid_t file_id; /* File ID for operation */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE6("e", "iiiiix", dset_id, mem_type_id, mem_space_id, file_space_id, - plist_id, buf); + dxpl_id, buf); - /* check arguments */ - if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") - if(NULL == dset->oloc.file) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") + /* Get the default dataset transfer property list if the user didn't provide one */ + if(H5P_DEFAULT == dxpl_id) + dxpl_id = H5P_DATASET_XFER_DEFAULT; + else + if(TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms") - if(mem_space_id < 0 || file_space_id < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") + /* Alloc dset_info */ + if(NULL == (dset_info = (H5D_dset_info_t *)H5MM_calloc(sizeof(H5D_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset info array buffer") - if(H5S_ALL != mem_space_id) { - if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") + /* Translate public multi-dataset info to internal structure */ + /* (And check parameters) */ + u_buf.rbuf = buf; + if(H5D__init_dset_info(dset_info, dset_id, mem_type_id, mem_space_id, file_space_id, &u_buf) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't init dataset info") - /* Check for valid selection */ - if(H5S_SELECT_VALID(mem_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") - } /* end if */ - if(H5S_ALL != file_space_id) { - if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") + /* Retrieve file_id */ + file_id = H5F_FILE_ID(dset_info->dset->oloc.file); - /* Check for valid selection */ - if(H5S_SELECT_VALID(file_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") - } /* end if */ + /* Call common pre-read routine */ + if(H5D__pre_read(file_id, dxpl_id, 1, dset_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't prepare for reading data") + +done: + if(dset_info) + H5MM_xfree(dset_info); + + FUNC_LEAVE_API(ret_value) +} /* end H5Dread() */ + + +/*------------------------------------------------------------------------- + * Function: H5Dread_multi + * + * Purpose: Multi-version of H5Dread(), which reads selections from + * multiple datasets from a file into application memory BUFS. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Jonathan Kim Nov, 2013 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Dread_multi(hid_t dxpl_id, size_t count, H5D_rw_multi_t *info) +{ + H5D_dset_info_t *dset_info = NULL; /* Pointer to internal list of multi-dataset info */ + size_t u; /* Local index variable */ + hid_t file_id; /* file ID where datasets are located */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE3("e", "iz*Dm", dxpl_id, count, info); + + if(count <= 0) + HGOTO_DONE(SUCCEED) /* Get the default dataset transfer property list if the user didn't provide one */ - if (H5P_DEFAULT == plist_id) - plist_id= H5P_DATASET_XFER_DEFAULT; + if(H5P_DEFAULT == dxpl_id) + dxpl_id = H5P_DATASET_XFER_DEFAULT; else - if(TRUE != H5P_isa_class(plist_id, H5P_DATASET_XFER)) + if(TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms") - /* read raw data */ - if(H5D__read(dset, mem_type_id, mem_space, file_space, plist_id, buf/*out*/) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + /* Alloc dset_info */ + if(NULL == (dset_info = (H5D_dset_info_t *)H5MM_calloc(count * sizeof(H5D_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset info array buffer") + + /* Translate public multi-dataset info to internal structure */ + /* (And check parameters) */ + for(u = 0; u < count; u++) { + if(H5D__init_dset_info(&dset_info[u], info[u].dset_id, info[u].mem_type_id, info[u].mem_space_id, + info[u].dset_space_id, &(info[u].u.rbuf)) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't init dataset info") + } /* end for */ + + if((file_id = H5D__verify_location(count, dset_info)) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "datasets are not in the same file") + + /* Call common pre-read routine */ + if(H5D__pre_read(file_id, dxpl_id, count, dset_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't prepare for reading data") done: + if(dset_info) + H5MM_xfree(dset_info); + FUNC_LEAVE_API(ret_value) -} /* end H5Dread() */ +} /* end H5Dread_multi() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__pre_read + * + * Purpose: Sets up a read operation. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner Apr, 2014 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__pre_read(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info) +{ + H5P_genplist_t *plist; /* DXPL property list pointer */ + H5FD_mpio_xfer_t xfer_mode; /* Parallel I/O transfer mode */ + hbool_t broke_mdset = FALSE; /* Whether to break multi-dataset option */ + size_t u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* check args */ + HDassert(dxpl_id > 0); + HDassert(count > 0); + HDassert(dset_info); + + /* Retrieve DXPL for queries below */ + if(NULL == (plist = H5P_object_verify(dxpl_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a dxpl") + + /* Get the transfer mode */ + if(H5P_get(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to get value") + + /* In independent mode or with an unsupported layout, for now just + read each dataset individually */ + if(xfer_mode == H5FD_MPIO_INDEPENDENT) + broke_mdset = TRUE; + else { + /* Multi-dset I/O currently supports CHUNKED and internal CONTIGUOUS + * only, not external CONTIGUOUS (EFL) or COMPACT. Fall back to + * individual dataset reads if any dataset uses an unsupported layout. + */ + for(u = 0; u < count; u++) { + if(!(dset_info[u].dset->shared->layout.type == H5D_CHUNKED || + (dset_info[u].dset->shared->layout.type == H5D_CONTIGUOUS && + dset_info[u].dset->shared->layout.ops != H5D_LOPS_EFL))) { + broke_mdset = TRUE; + break; + } + } /* end for */ + } + + if(broke_mdset) { + /* Read raw data from each dataset by iteself */ + for(u = 0; u < count; u++) + if(H5D__read(file_id, dxpl_id, 1, &dset_info[u]) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + } /* end if */ + else { + HDassert(xfer_mode == H5FD_MPIO_COLLECTIVE); + + if(count > 0) { + if(H5D__read(file_id, dxpl_id, count, dset_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + } /* end if */ +#ifdef H5_HAVE_PARALLEL + /* MSC - I do not think we should allow for this. I think we + should make the multi dataset APIs enforce a uniform list + of datasets among all processes, and users would enter a + NULL selection when a process does not have anything to + write to a particulat dataset. */ + else { + if(H5D__match_coll_calls(file_id, plist, TRUE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "failed in matching collective MPI calls") + } /* end else */ +#endif /* H5_HAVE_PARALLEL */ + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__pre_read() */ /*------------------------------------------------------------------------- @@ -186,14 +413,14 @@ done: * PLIST_ID argument. * * The FILE_SPACE_ID can be the constant H5S_ALL which indicates - * that the entire file data space is to be referenced. + * that the entire file dataspace is to be referenced. * * The MEM_SPACE_ID can be the constant H5S_ALL in which case - * the memory data space is the same as the file data space + * the memory dataspace is the same as the file dataspace * defined when the dataset was created. * - * The number of elements in the memory data space must match - * the number of elements in the file data space. + * The number of elements in the memory dataspace must match + * the number of elements in the file dataspace. * * The PLIST_ID can be the constant H5P_DEFAULT in which * case the default data transfer properties are used. @@ -207,107 +434,159 @@ done: */ herr_t H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, - hid_t file_space_id, hid_t dxpl_id, const void *buf) + hid_t file_space_id, hid_t dxpl_id, const void *buf) { - H5D_t *dset = NULL; - H5P_genplist_t *plist; /* Property list pointer */ - const H5S_t *mem_space = NULL; - const H5S_t *file_space = NULL; - hbool_t direct_write = FALSE; - herr_t ret_value = SUCCEED; /* Return value */ + H5D_dset_info_t *dset_info = NULL; /* Internal multi-dataset info placeholder */ + H5D_dset_buf_t u_buf; /* Buffer pointer */ + hid_t file_id; /* File ID for operation */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API(FAIL) H5TRACE6("e", "iiiii*x", dset_id, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf); - /* check arguments */ - if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") - if(NULL == dset->oloc.file) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file") - /* Get the default dataset transfer property list if the user didn't provide one */ if(H5P_DEFAULT == dxpl_id) - dxpl_id= H5P_DATASET_XFER_DEFAULT; + dxpl_id = H5P_DATASET_XFER_DEFAULT; else if(TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms") - /* Get the dataset transfer property list */ - if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") + /* Alloc dset_info */ + if(NULL == (dset_info = (H5D_dset_info_t *)H5MM_calloc(sizeof(H5D_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset info array buffer") - /* Retrieve the 'direct write' flag */ - if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &direct_write) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting flag for direct chunk write") + /* Translate public multi-dataset info to internal structure */ + /* (And check parameters) */ + u_buf.wbuf = buf; + if(H5D__init_dset_info(dset_info, dset_id, mem_type_id, mem_space_id, file_space_id, &u_buf) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't init dataset info") - /* Check dataspace selections if this is not a direct write */ - if(!direct_write) { - if(mem_space_id < 0 || file_space_id < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") - - if(H5S_ALL != mem_space_id) { - if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") - - /* Check for valid selection */ - if(H5S_SELECT_VALID(mem_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "memory selection+offset not within extent") - } /* end if */ - if(H5S_ALL != file_space_id) { - if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace") - - /* Check for valid selection */ - if(H5S_SELECT_VALID(file_space) != TRUE) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "file selection+offset not within extent") - } /* end if */ - } + /* Retrieve file_id */ + file_id = H5F_FILE_ID(dset_info->dset->oloc.file); - if(H5D__pre_write(dset, direct_write, mem_type_id, mem_space, file_space, dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't prepare for writing data") + /* Call common pre-write routine */ + if(H5D__pre_write(file_id, dxpl_id, 1, dset_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't prepare for writing data") done: + if(dset_info) + H5MM_xfree(dset_info); FUNC_LEAVE_API(ret_value) } /* end H5Dwrite() */ /*------------------------------------------------------------------------- - * Function: H5D__pre_write + * Function: H5Dwrite_multi * - * Purpose: Preparation for writing data. + * Purpose: Multi-version of H5Dwrite(), which writes selections from + * application memory BUFs into multiple datasets in a file. * * Return: Non-negative on success/Negative on failure * - * Programmer: Raymond Lu - * 2 November 2012 + * Programmer: Jonathan Kim Nov, 2013 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Dwrite_multi(hid_t dxpl_id, size_t count, const H5D_rw_multi_t *info) +{ + H5D_dset_info_t *dset_info = NULL; /* Pointer to internal list of multi-dataset info */ + size_t u; /* Local index variable */ + hid_t file_id; /* file ID where datasets are located */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE3("e", "iz*Dm", dxpl_id, count, info); + + if(count <= 0) + HGOTO_DONE(SUCCEED) + + /* Get the default dataset transfer property list if the user didn't provide one */ + if(H5P_DEFAULT == dxpl_id) + dxpl_id = H5P_DATASET_XFER_DEFAULT; + else + if(TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms") + + /* Alloc dset_info */ + if(NULL == (dset_info = (H5D_dset_info_t *)H5MM_calloc(count * sizeof(H5D_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset info array buffer") + + /* Translate public multi-dataset info to internal structure */ + /* (And check parameters) */ + for(u = 0; u < count; u++) { + if(H5D__init_dset_info(&dset_info[u], info[u].dset_id, info[u].mem_type_id, info[u].mem_space_id, + info[u].dset_space_id, &(info[u].u.wbuf)) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't init dataset info") + } + + if((file_id = H5D__verify_location(count, dset_info)) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "datasets are not in the same file") + + /* Call common pre-write routine */ + if(H5D__pre_write(file_id, dxpl_id, count, dset_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't prepare for writing data") + +done: + if(dset_info) + H5MM_xfree(dset_info); + + FUNC_LEAVE_API(ret_value) +} /* end H5Dwrite_multi() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__pre_write + * + * Purpose: Sets up a write operation. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Jonathan Kim Nov, 2013 * *------------------------------------------------------------------------- */ static herr_t -H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id, - const H5S_t *mem_space, const H5S_t *file_space, - hid_t dxpl_id, const void *buf) +H5D__pre_write(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info) { - herr_t ret_value = SUCCEED; /* Return value */ + H5P_genplist_t *plist; /* DXPL property list pointer */ + hbool_t direct_write = FALSE; /* Flag for direct writing */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC - /* Direct chunk write */ - if(direct_write) { - H5P_genplist_t *plist; /* Property list pointer */ - uint32_t direct_filters; - hsize_t *direct_offset; - uint32_t direct_datasize; - hsize_t internal_offset[H5O_LAYOUT_NDIMS]; - unsigned u; /* Local index variable */ + /* check args */ + HDassert(dxpl_id > 0); + HDassert(count > 0); + HDassert(dset_info); - /* Get the dataset transfer property list */ - if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") + /* Retrieve DXPL for queries below */ + if(NULL == (plist = H5P_object_verify(dxpl_id, H5P_DATASET_XFER))) + HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not a dxpl") - if(H5D_CHUNKED != dset->shared->layout.type) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset") + /* Check if direct write or not */ + if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &direct_write) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting flag for direct chunk write") + + /* Direct chunk write */ + if(direct_write) { + uint32_t direct_filters; /* Filters already applied to chunk */ + hsize_t *direct_offset; /* Offset of chunk */ + uint32_t direct_datasize; /* [Pre-compressed] size of chunk */ + int sndims; /* Dataspace rank (signed) */ + unsigned ndims; /* Dataspace rank */ + hsize_t dims[H5O_LAYOUT_NDIMS]; /* Dataspace dimensions */ + hsize_t internal_offset[H5O_LAYOUT_NDIMS]; /* Internal copy of the chunk offset */ + unsigned u; /* Local index variable */ + + /* Sanity check */ + HDassert(count == 1); + + /* Verify dataset is chunked */ + if(H5D_CHUNKED != dset_info[0].dset->shared->layout.type) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset") /* Retrieve parameters for direct chunk write */ if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &direct_filters) < 0) @@ -317,31 +596,87 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id, if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &direct_datasize) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting data size for direct chunk write") - /* The library's chunking code requires the offset terminates with a zero. So transfer the - * offset array to an internal offset array */ - for(u = 0; u < dset->shared->ndims; u++) { - /* Make sure the offset doesn't exceed the dataset's dimensions */ - if(direct_offset[u] > dset->shared->curr_dims[u]) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset") + /* The library's chunking code requires the offset terminates with a + * zero. So transfer the offset array to an internal offset array */ + if((sndims = H5S_get_simple_extent_dims(dset_info[0].dset->shared->space, dims, NULL)) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve dataspace extent dims") + H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int); + + /* Sanity check chunk offset and set up internal offset array */ + for(u = 0; u < ndims; u++) { + /* Make sure the offset doesn't exceed the dataset's dimensions */ + if(direct_offset[u] > dset_info[0].dset->shared->curr_dims[u]) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset") /* Make sure the offset fall right on a chunk's boundary */ - if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u]) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary") - - internal_offset[u] = direct_offset[u]; - } /* end for */ - - /* Terminate the offset with a zero */ - internal_offset[dset->shared->ndims] = 0; - - /* write raw data */ - if(H5D__chunk_direct_write(dset, dxpl_id, direct_filters, internal_offset, direct_datasize, buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write chunk directly") - } /* end if */ - else { /* Normal write */ + if(direct_offset[u] % dset_info[0].dset->shared->layout.u.chunk.dim[u]) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary") + + internal_offset[u] = direct_offset[u]; + } /* end for */ + + /* Terminate the offset with a zero */ + internal_offset[ndims] = 0; + /* write raw data */ - if(H5D__write(dset, mem_type_id, mem_space, file_space, dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") + if(H5D__chunk_direct_write(dset_info[0].dset, dxpl_id, direct_filters, internal_offset, + direct_datasize, dset_info[0].u.wbuf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write chunk directly") + } /* end if */ + else { + size_t u; /* Local index variable */ + hbool_t broke_mdset = FALSE; /* Whether to break multi-dataset option */ + H5FD_mpio_xfer_t xfer_mode; /* Parallel I/O transfer mode */ + + /* Get the transfer mode */ + if(H5P_get(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to get value") + + /* In independent mode or with an unsupported layout, for now + just write each dataset individually */ + if(xfer_mode == H5FD_MPIO_INDEPENDENT) + broke_mdset = TRUE; + else { + /* Multi-dset I/O currently supports CHUNKED and internal CONTIGUOUS + * only, not external CONTIGUOUS (EFL) or COMPACT. Fall back to + * individual dataset writes if any dataset uses an unsupported layout. + */ + for(u = 0; u < count; u++) { + if(!(dset_info[u].dset->shared->layout.type == H5D_CHUNKED || + (dset_info[u].dset->shared->layout.type == H5D_CONTIGUOUS && + dset_info[u].dset->shared->layout.ops != H5D_LOPS_EFL))) { + broke_mdset = TRUE; + break; + } + } /* end for */ + } + + if(broke_mdset) { + /* Write raw data to each dataset by iteself */ + for(u = 0; u < count; u++) + if(H5D__write(file_id, dxpl_id, 1, &dset_info[u]) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") + } /* end if */ + else { + HDassert(xfer_mode == H5FD_MPIO_COLLECTIVE); + + if(count > 0) { + if(H5D__write(file_id, dxpl_id, count, dset_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") + } /* end if */ + +#ifdef H5_HAVE_PARALLEL + /* MSC - I do not think we should allow for this. I think we + should make the multi dataset APIs enforce a uniform list + of datasets among all processes, and users would enter a + NULL selection when a process does not have anything to + write to a particulat dataset. */ + else { + if(H5D__match_coll_calls(file_id, plist, FALSE) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "failed in matching collective MPI calls") + } /* end else */ +#endif /* H5_HAVE_PARALLEL */ + } /* end else */ } /* end else */ done: @@ -352,27 +687,24 @@ done: /*------------------------------------------------------------------------- * Function: H5D__read * - * Purpose: Reads (part of) a DATASET into application memory BUF. See - * H5Dread() for complete details. + * Purpose: Reads multiple (part of) DATASETs into application memory BUFs. + * See H5Dread_multi() for complete details. * * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke - * Thursday, December 4, 1997 + * Programmer: Jonathan Kim Nov, 2013 * *------------------------------------------------------------------------- */ herr_t -H5D__read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, - const H5S_t *file_space, hid_t dxpl_id, void *buf/*out*/) +H5D__read(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info) { - H5D_chunk_map_t fm; /* Chunk file<->memory mapping */ - H5D_io_info_t io_info; /* Dataset I/O info */ - H5D_type_info_t type_info; /* Datatype info for operation */ - hbool_t type_info_init = FALSE; /* Whether the datatype info has been initialized */ - H5S_t * projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */ + H5D_io_info_t io_info; /* Dataset I/O info for multi dsets */ + size_t type_info_init = 0; /* Number of datatype info structs that have been initialized */ + H5S_t ** projected_mem_space; /* If not NULL, ptr to dataspace containing a */ /* projection of the supplied mem_space to a new */ - /* data space with rank equal to that of */ + /* dataspace with rank equal to that of */ /* file_space. */ /* */ /* This field is only used if */ @@ -383,166 +715,236 @@ H5D__read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, /* Note that if this variable is used, the */ /* projected mem space must be discarded at the */ /* end of the function to avoid a memory leak. */ - H5D_storage_t store; /*union of EFL and chunk pointer in file space */ - hssize_t snelmts; /*total number of elmts (signed) */ - hsize_t nelmts; /*total number of elmts */ + H5D_storage_t *store = NULL; /* Union of EFL and chunk pointer in file space */ + hssize_t snelmts; /* Total number of elmts (signed) */ + hsize_t nelmts; /* Total number of elmts */ hbool_t io_info_init = FALSE; /* Whether the I/O info has been initialized */ - hbool_t io_op_init = FALSE; /* Whether the I/O op has been initialized */ + size_t io_op_init = 0; /* Number I/O ops that have been initialized */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + size_t i; /* Local index variable */ char fake_char; /* Temporary variable for NULL buffer pointers */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE_TAG(dxpl_id, dataset->oloc.addr, FAIL) + FUNC_ENTER_NOAPI(FAIL) + + /* init io_info */ + io_info.sel_pieces = NULL; + io_info.store_faddr = 0; + io_info.base_maddr_r = NULL; - /* check args */ - HDassert(dataset && dataset->oloc.file); + /* Create global piece skiplist */ + if(NULL == (io_info.sel_pieces = H5SL_create(H5SL_TYPE_HADDR, NULL))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for piece selections") - if(!file_space) - file_space = dataset->shared->space; - if(!mem_space) - mem_space = file_space; - if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dst dataspace has invalid selection") - H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t); + /* Use provided dset_info */ + io_info.dsets_info = dset_info; - /* Fill the DXPL cache values for later use */ - if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + /* Allocate other buffers */ + if(NULL == (projected_mem_space = (H5S_t **)H5MM_calloc(count * sizeof(H5S_t*)))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "couldn't allocate dset space array ptr") + if(NULL == (store = (H5D_storage_t *)H5MM_malloc(count * sizeof(H5D_storage_t)))) + HGOTO_ERROR(H5E_STORAGE, H5E_CANTALLOC, FAIL, "couldn't allocate dset storage info array buffer") - /* Set up datatype info for operation */ - if(H5D__typeinfo_init(dataset, dxpl_cache, dxpl_id, mem_type_id, FALSE, &type_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info") - type_info_init = TRUE; + /* init both dxpls to the original one */ + io_info.md_dxpl_id = dxpl_id; + io_info.raw_dxpl_id = dxpl_id; + + /* set the dxpl IO type for sanity checking at the FD layer */ +#ifdef H5_DEBUG_BUILD + if(H5D_set_io_info_dxpls(&io_info, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't set metadata and raw data dxpls") +#endif /* H5_DEBUG_BUILD */ + + /* iterate over all dsets and construct I/O information necessary to do I/O */ + for(i = 0; i < count; i++) { + /* check args */ + if(NULL == dset_info[i].dset) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") + if(NULL == dset_info[i].dset->oloc.file) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file") + + /* Fill the DXPL cache values for later use */ + if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Set up datatype info for operation */ + if(H5D__typeinfo_init(dset_info[i].dset, dxpl_cache, dxpl_id, dset_info[i].mem_type_id, + FALSE, &(dset_info[i].type_info)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info") + type_info_init++; #ifdef H5_HAVE_PARALLEL - /* Collective access is not permissible without a MPI based VFD */ - if(dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE && - !(H5F_HAS_FEATURE(dataset->oloc.file, H5FD_FEAT_HAS_MPI))) - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based drivers only") + /* Collective access is not permissible without a MPI based VFD */ + if(dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE && + !(H5F_HAS_FEATURE(dset_info[i].dset->oloc.file, H5FD_FEAT_HAS_MPI))) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based drivers only") #endif /*H5_HAVE_PARALLEL*/ - /* Make certain that the number of elements in each selection is the same */ - if(nelmts != (hsize_t)H5S_GET_SELECT_NPOINTS(file_space)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src and dest data spaces have different sizes") + if((snelmts = H5S_GET_SELECT_NPOINTS(dset_info[i].mem_space)) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dst dataspace has invalid selection") + H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t); - /* Check for a NULL buffer, after the H5S_ALL dataspace selection has been handled */ - if(NULL == buf) { - /* Check for any elements selected (which is invalid) */ - if(nelmts > 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer") + /* Make certain that the number of elements in each selection is the same */ + if(nelmts != (hsize_t)H5S_GET_SELECT_NPOINTS(dset_info[i].file_space)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src and dest dataspace have different sizes") - /* If the buffer is nil, and 0 element is selected, make a fake buffer. - * This is for some MPI package like ChaMPIon on NCSA's tungsten which - * doesn't support this feature. - */ - buf = &fake_char; - } /* end if */ + /* Check for a NULL buffer, after the H5S_ALL dataspace selection has been handled */ + if(NULL == dset_info[i].u.rbuf) { + /* Check for any elements selected (which is invalid) */ + if(nelmts > 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer") - /* Make sure that both selections have their extents set */ - if(!(H5S_has_extent(file_space))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file dataspace does not have extent set") - if(!(H5S_has_extent(mem_space))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set") - - /* H5S_select_shape_same() has been modified to accept topologically identical - * selections with different rank as having the same shape (if the most - * rapidly changing coordinates match up), but the I/O code still has - * difficulties with the notion. - * - * To solve this, we check to see if H5S_select_shape_same() returns true, - * and if the ranks of the mem and file spaces are different. If the are, - * construct a new mem space that is equivalent to the old mem space, and - * use that instead. - * - * Note that in general, this requires us to touch up the memory buffer as - * well. - */ - if(TRUE == H5S_select_shape_same(mem_space, file_space) && - H5S_GET_EXTENT_NDIMS(mem_space) != H5S_GET_EXTENT_NDIMS(file_space)) { - void *adj_buf = NULL; /* Pointer to the location in buf corresponding */ - /* to the beginning of the projected mem space. */ - - /* Attempt to construct projected dataspace for memory dataspace */ - if(H5S_select_construct_projection(mem_space, &projected_mem_space, - (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, (const void **)&adj_buf, type_info.dst_type_size) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace") - HDassert(projected_mem_space); - HDassert(adj_buf); - - /* Switch to using projected memory dataspace & adjusted buffer */ - mem_space = projected_mem_space; - buf = adj_buf; - } /* end if */ + /* If the buffer is nil, and 0 element is selected, make a fake buffer. + * This is for some MPI package like ChaMPIon on NCSA's tungsten which + * doesn't support this feature. + */ + dset_info[i].u.rbuf = &fake_char; + } /* end if */ + + /* Make sure that both selections have their extents set */ + if(!(H5S_has_extent(dset_info[i].file_space))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file dataspace does not have extent set") + if(!(H5S_has_extent(dset_info[i].mem_space))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set") + + /* H5S_select_shape_same() has been modified to accept topologically + * identical selections with different rank as having the same shape + * (if the most rapidly changing coordinates match up), but the I/O + * code still has difficulties with the notion. + * + * To solve this, we check to see if H5S_select_shape_same() returns + * true, and if the ranks of the mem and file spaces are different. + * If the are, construct a new mem space that is equivalent to the + * old mem space, and use that instead. + * + * Note that in general, this requires us to touch up the memory buffer + * as well. + */ + if(TRUE == H5S_select_shape_same(dset_info[i].mem_space, dset_info[i].file_space) && + H5S_GET_EXTENT_NDIMS(dset_info[i].mem_space) != H5S_GET_EXTENT_NDIMS(dset_info[i].file_space)) { + const void *adj_buf = NULL; /* Pointer to the location in buf corresponding */ + /* to the beginning of the projected mem space. */ + + /* Attempt to construct projected dataspace for memory dataspace */ + if(H5S_select_construct_projection(dset_info[i].mem_space, &(projected_mem_space[i]), + (unsigned)H5S_GET_EXTENT_NDIMS(dset_info[i].file_space), dset_info[i].u.rbuf, + (const void **)&adj_buf, + (hsize_t)dset_info[i].type_info.dst_type_size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace") + HDassert(projected_mem_space[i]); + HDassert(adj_buf); + + /* Switch to using projected memory dataspace & adjusted buffer */ + dset_info[i].mem_space = projected_mem_space[i]; + dset_info[i].u.rbuf = (void *)adj_buf; + } /* end if */ + /* Retrieve dataset properties */ + /* <none needed in the general case> */ - /* Retrieve dataset properties */ - /* <none needed in the general case> */ + /* If space hasn't been allocated and not using external storage, + * return fill value to buffer if fill time is upon allocation, or + * do nothing if fill time is never. If the dataset is compact and + * fill time is NEVER, there is no way to tell whether part of data + * has been overwritten. So just proceed in reading. + */ + if(nelmts > 0 && dset_info[i].dset->shared->dcpl_cache.efl.nused == 0 && + !(*dset_info[i].dset->shared->layout.ops->is_space_alloc)(&dset_info[i].dset->shared->layout.storage)) { + H5D_fill_value_t fill_status; /* Whether/How the fill value is defined */ + + /* Retrieve dataset's fill-value properties */ + if(H5P_is_fill_value_defined(&dset_info[i].dset->shared->dcpl_cache.fill, &fill_status) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") + + /* Should be impossible, but check anyway... */ + if(fill_status == H5D_FILL_VALUE_UNDEFINED && + (dset_info[i].dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC || + dset_info[i].dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET)) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "read failed: dataset doesn't exist, no data can be read") + + /* If we're never going to fill this dataset, just leave the junk in the user's buffer */ + if(dset_info[i].dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_NEVER) + HGOTO_DONE(SUCCEED) + + /* Go fill the user's selection with the dataset's fill value */ + if(H5D__fill(dset_info[i].dset->shared->dcpl_cache.fill.buf, dset_info[i].dset->shared->type, + dset_info[i].u.rbuf, dset_info[i].type_info.mem_type, dset_info[i].mem_space, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "filling buf failed") + else + HGOTO_DONE(SUCCEED) + } /* end if */ - /* If space hasn't been allocated and not using external storage, - * return fill value to buffer if fill time is upon allocation, or - * do nothing if fill time is never. If the dataset is compact and - * fill time is NEVER, there is no way to tell whether part of data - * has been overwritten. So just proceed in reading. - */ - if(nelmts > 0 && dataset->shared->dcpl_cache.efl.nused == 0 && - !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage)) { - H5D_fill_value_t fill_status; /* Whether/How the fill value is defined */ - - /* Retrieve dataset's fill-value properties */ - if(H5P_is_fill_value_defined(&dataset->shared->dcpl_cache.fill, &fill_status) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") - - /* Should be impossible, but check anyway... */ - if(fill_status == H5D_FILL_VALUE_UNDEFINED && - (dataset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC || dataset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET)) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "read failed: dataset doesn't exist, no data can be read") - - /* If we're never going to fill this dataset, just leave the junk in the user's buffer */ - if(dataset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_NEVER) - HGOTO_DONE(SUCCEED) - - /* Go fill the user's selection with the dataset's fill value */ - if(H5D__fill(dataset->shared->dcpl_cache.fill.buf, dataset->shared->type, buf, - type_info.mem_type, mem_space, dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "filling buf failed") - else - HGOTO_DONE(SUCCEED) - } /* end if */ + /* Set up I/O operation */ + io_info.op_type = H5D_IO_OP_READ; + if(H5D__ioinfo_init(dset_info[i].dset, dxpl_cache, dxpl_id, &(dset_info[i]), + &(store[i]), &io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation") + io_info_init = TRUE; - /* Set up I/O operation */ - io_info.op_type = H5D_IO_OP_READ; - io_info.u.rbuf = buf; - if(H5D__ioinfo_init(dataset, dxpl_cache, dxpl_id, &type_info, &store, &io_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation") - io_info_init = TRUE; - - /* Sanity check that space is allocated, if there are elements */ - if(nelmts > 0) - HDassert((*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage) - || dataset->shared->dcpl_cache.efl.nused > 0 - || dataset->shared->layout.type == H5D_COMPACT); - - /* Call storage method's I/O initialization routine */ - HDmemset(&fm, 0, sizeof(H5D_chunk_map_t)); - if(io_info.layout_ops.io_init && (*io_info.layout_ops.io_init)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") - io_op_init = TRUE; + /* Sanity check that space is allocated, if there are elements */ + if(nelmts > 0) + HDassert((*dset_info[i].dset->shared->layout.ops->is_space_alloc) + (&dset_info[i].dset->shared->layout.storage) + || dset_info[i].dset->shared->dcpl_cache.efl.nused > 0 + || dset_info[i].dset->shared->layout.type == H5D_COMPACT); + + /* Call storage method's I/O initialization routine */ + /* Init io_info.dset_info[] and generate piece_info in skip list */ + if(dset_info[i].layout_ops.io_init && + (*dset_info[i].layout_ops.io_init)(&io_info, &(dset_info[i].type_info), nelmts, + dset_info[i].file_space, dset_info[i].mem_space, + &(dset_info[i])) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") + io_op_init++; + } /* end of for loop */ + + assert(type_info_init == count); + assert(io_op_init == count); #ifdef H5_HAVE_PARALLEL /* Adjust I/O info for any parallel I/O */ - if(H5D__ioinfo_adjust(&io_info, dataset, dxpl_id, file_space, mem_space, &type_info, &fm) < 0) + if(H5D__ioinfo_adjust(count, &io_info, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to adjust I/O info for parallel I/O") +#else + io_info.is_coll_broken = TRUE; #endif /*H5_HAVE_PARALLEL*/ /* Invoke correct "high level" I/O routine */ - if((*io_info.io_ops.multi_read)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + /* If collective mode is broken, perform read IO in independent mode via + * single-dset path with looping. + * Multiple-dset path can not be called since it is not supported, so make + * detour through single-dset path */ + if(TRUE == io_info.is_coll_broken) { + haddr_t prev_tag = HADDR_UNDEF; + + /* Loop with serial & single-dset read IO path */ + for(i = 0; i < count; i++) { + /* set metadata tagging with dset oheader addr */ + if(H5AC_tag(io_info.md_dxpl_id, dset_info[i].dset->oloc.addr, &prev_tag) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") + + io_info.dsets_info = &(dset_info[i]); + + if((*io_info.io_ops.multi_read)(&io_info, &(dset_info[i].type_info), nelmts, dset_info[i].file_space, + dset_info[i].mem_space, &dset_info[i]) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") + + /* Reset metadata tagging */ + if(H5AC_tag(io_info.md_dxpl_id, prev_tag, NULL) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") + } + } /* end if */ + else + if((*io_info.io_ops.multi_read_md)(file_id, count, &io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") done: /* Shut down the I/O op information */ - if(io_op_init && io_info.layout_ops.io_term && (*io_info.layout_ops.io_term)(&fm) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info") + for(i = 0; i < io_op_init; i++) + if(dset_info[i].layout_ops.io_term && (*dset_info[i].layout_ops.io_term)(&io_info, &(dset_info[i])) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info") + if(io_info_init) { #ifdef H5_DEBUG_BUILD @@ -558,42 +960,54 @@ done: } /* Shut down datatype info for operation */ - if(type_info_init && H5D__typeinfo_term(&type_info) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info") - - /* discard projected mem space if it was created */ - if(NULL != projected_mem_space) - if(H5S_close(projected_mem_space) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace") + for(i = 0; i < type_info_init; i++) + if(H5D__typeinfo_term(&(dset_info[i].type_info)) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info") + + /* Discard projected mem spaces if they were created */ + for(i = 0; i < count; i++) + if(NULL != projected_mem_space[i]) + if(H5S_close(projected_mem_space[i]) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace") + + /* Free global piece skiplist */ + if(io_info.sel_pieces) + if(H5SL_close(io_info.sel_pieces) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't close dataset skip list") + + /* io_info.dsets_info was allocated in calling function */ + if(projected_mem_space) + H5MM_xfree(projected_mem_space); + if(store) + H5MM_xfree(store); - FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL) + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__read() */ /*------------------------------------------------------------------------- * Function: H5D__write * - * Purpose: Writes (part of) a DATASET to a file from application memory - * BUF. See H5Dwrite() for complete details. + * Purpose: Writes multiple (part of) DATASETs to a file from application + * memory BUFs. See H5Dwrite_multi() for complete details. + * + * This was referred from H5D__write for multi-dset work. * * Return: Non-negative on success/Negative on failure * - * Programmer: Robb Matzke - * Thursday, December 4, 1997 + * Programmer: Jonathan Kim Nov, 2013 * *------------------------------------------------------------------------- */ herr_t -H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, - const H5S_t *file_space, hid_t dxpl_id, const void *buf) -{ - H5D_chunk_map_t fm; /* Chunk file<->memory mapping */ - H5D_io_info_t io_info; /* Dataset I/O info */ - H5D_type_info_t type_info; /* Datatype info for operation */ - hbool_t type_info_init = FALSE; /* Whether the datatype info has been initialized */ - H5S_t * projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */ +H5D__write(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info) +{ + H5D_io_info_t io_info; /* Dataset I/O info for multi dsets */ + size_t type_info_init = 0; /* Number of datatype info structs that have been initialized */ + H5S_t **projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */ /* projection of the supplied mem_space to a new */ - /* data space with rank equal to that of */ + /* dataspace with rank equal to that of */ /* file_space. */ /* */ /* This field is only used if */ @@ -604,182 +1018,259 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, /* Note that if this variable is used, the */ /* projected mem space must be discarded at the */ /* end of the function to avoid a memory leak. */ - H5D_storage_t store; /*union of EFL and chunk pointer in file space */ - hssize_t snelmts; /*total number of elmts (signed) */ - hsize_t nelmts; /*total number of elmts */ + H5D_storage_t *store = NULL; /* Union of EFL and chunk pointer in file space */ + hssize_t snelmts; /* Total number of elmts (signed) */ + hsize_t nelmts; /* Total number of elmts */ hbool_t io_info_init = FALSE; /* Whether the I/O info has been initialized */ - hbool_t io_op_init = FALSE; /* Whether the I/O op has been initialized */ + size_t io_op_init = 0; /* Number I/O ops that have been initialized */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + size_t i; /* Local index variable */ char fake_char; /* Temporary variable for NULL buffer pointers */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_PACKAGE_TAG(dxpl_id, dataset->oloc.addr, FAIL) + FUNC_ENTER_NOAPI(FAIL) - /* check args */ - HDassert(dataset && dataset->oloc.file); + /* Init io_info */ + io_info.sel_pieces = NULL; + io_info.store_faddr = 0; + io_info.base_maddr_w = NULL; - /* All filters in the DCPL must have encoding enabled. */ - if(!dataset->shared->checked_filters) { - if(H5Z_can_apply(dataset->shared->dcpl_id, dataset->shared->type_id) < 0) - HGOTO_ERROR(H5E_PLINE, H5E_CANAPPLY, FAIL, "can't apply filters") + /* Create global piece skiplist */ + if(NULL == (io_info.sel_pieces = H5SL_create(H5SL_TYPE_HADDR, NULL))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for piece selections") - dataset->shared->checked_filters = TRUE; - } /* end if */ + /* Use provided dset_info */ + io_info.dsets_info = dset_info; + + /* Allocate other buffers */ + if(NULL == (projected_mem_space = (H5S_t **)H5MM_calloc(count * sizeof(H5S_t*)))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "couldn't allocate dset space array ptr") + if(NULL == (store = (H5D_storage_t *)H5MM_malloc(count * sizeof(H5D_storage_t)))) + HGOTO_ERROR(H5E_STORAGE, H5E_CANTALLOC, FAIL, "couldn't allocate dset storage info array buffer") + + /* init both dxpls to the original one */ + io_info.md_dxpl_id = dxpl_id; + io_info.raw_dxpl_id = dxpl_id; + + /* set the dxpl IO type for sanity checking at the FD layer */ +#ifdef H5_DEBUG_BUILD + if(H5D_set_io_info_dxpls(&io_info, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't set metadata and raw data dxpls") +#endif /* H5_DEBUG_BUILD */ - /* Check if we are allowed to write to this file */ - if(0 == (H5F_INTENT(dataset->oloc.file) & H5F_ACC_RDWR)) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "no write intent on file") + /* iterate over all dsets and construct I/O information */ + for(i = 0; i < count; i++) { + haddr_t prev_tag = HADDR_UNDEF; - /* Fill the DXPL cache values for later use */ - if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + /* check args */ + if(NULL == dset_info[i].dset) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") + if(NULL == dset_info[i].dset->oloc.file) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file") - /* Set up datatype info for operation */ - if(H5D__typeinfo_init(dataset, dxpl_cache, dxpl_id, mem_type_id, TRUE, &type_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info") - type_info_init = TRUE; + /* set metadata tagging with dset oheader addr */ + if(H5AC_tag(io_info.md_dxpl_id, dset_info[i].dset->oloc.addr, &prev_tag) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") - /* Various MPI based checks */ + /* All filters in the DCPL must have encoding enabled. */ + if(!dset_info[i].dset->shared->checked_filters) { + if(H5Z_can_apply(dset_info[i].dset->shared->dcpl_id, dset_info[i].dset->shared->type_id) < 0) + HGOTO_ERROR(H5E_PLINE, H5E_CANAPPLY, FAIL, "can't apply filters") + + dset_info[i].dset->shared->checked_filters = TRUE; + } /* end if */ + + /* Check if we are allowed to write to this file */ + if(0 == (H5F_INTENT(dset_info[i].dset->oloc.file) & H5F_ACC_RDWR)) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "no write intent on file") + + /* Fill the DXPL cache values for later use */ + if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Set up datatype info for operation */ + if(H5D__typeinfo_init(dset_info[i].dset, dxpl_cache, dxpl_id, dset_info[i].mem_type_id, + TRUE, &(dset_info[i].type_info)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up type info") + type_info_init++; + + /* Various MPI based checks */ #ifdef H5_HAVE_PARALLEL - if H5F_HAS_FEATURE(dataset->oloc.file, H5FD_FEAT_HAS_MPI) { - /* If MPI based VFD is used, no VL datatype support yet. */ - /* This is because they use the global heap in the file and we don't */ - /* support parallel access of that yet */ - if(H5T_detect_class(type_info.mem_type, H5T_VLEN, FALSE) > 0) - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Parallel IO does not support writing VL datatypes yet") - - /* If MPI based VFD is used, no VL datatype support yet. */ - /* This is because they use the global heap in the file and we don't */ - /* support parallel access of that yet */ - /* We should really use H5T_detect_class() here, but it will be difficult - * to detect the type of the reference if it is nested... -QAK - */ - if(H5T_get_class(type_info.mem_type, TRUE) == H5T_REFERENCE && - H5T_get_ref_type(type_info.mem_type) == H5R_DATASET_REGION) - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Parallel IO does not support writing region reference datatypes yet") - - /* Can't write to chunked datasets with filters, in parallel */ - if(dataset->shared->layout.type == H5D_CHUNKED && - dataset->shared->dcpl_cache.pline.nused > 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot write to chunked storage with filters in parallel") - } /* end if */ - else { - /* Collective access is not permissible without a MPI based VFD */ - if(dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) - HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based driver only") - } /* end else */ + if H5F_HAS_FEATURE(dset_info[i].dset->oloc.file, H5FD_FEAT_HAS_MPI) { + /* If MPI based VFD is used, no VL datatype support yet. */ + /* This is because they use the global heap in the file and we don't */ + /* support parallel access of that yet */ + if(H5T_detect_class(dset_info[i].type_info.mem_type, H5T_VLEN, FALSE) > 0) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Parallel IO does not support writing VL datatypes yet") + + /* If MPI based VFD is used, no VL datatype support yet. */ + /* This is because they use the global heap in the file and we don't */ + /* support parallel access of that yet */ + /* We should really use H5T_detect_class() here, but it will be difficult + * to detect the type of the reference if it is nested... -QAK + */ + if(H5T_get_class(dset_info[i].type_info.mem_type, TRUE) == H5T_REFERENCE && + H5T_get_ref_type(dset_info[i].type_info.mem_type) == H5R_DATASET_REGION) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Parallel IO does not support writing region reference datatypes yet") + + /* Can't write to chunked datasets with filters, in parallel */ + if(dset_info[i].dset->shared->layout.type == H5D_CHUNKED && + dset_info[i].dset->shared->dcpl_cache.pline.nused > 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot write to chunked storage with filters in parallel") + } /* end if */ + else { + /* Collective access is not permissible without a MPI based VFD */ + if(dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based driver only") + } /* end else */ #endif /*H5_HAVE_PARALLEL*/ - /* Initialize dataspace information */ - if(!file_space) - file_space = dataset->shared->space; - if(!mem_space) - mem_space = file_space; + if((snelmts = H5S_GET_SELECT_NPOINTS(dset_info[i].mem_space)) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection") + H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t); - if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection") - H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t); + /* Make certain that the number of elements in each selection is the same */ + if(nelmts != (hsize_t)H5S_GET_SELECT_NPOINTS(dset_info[i].file_space)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src and dest dataspace have different sizes") - /* Make certain that the number of elements in each selection is the same */ - if(nelmts != (hsize_t)H5S_GET_SELECT_NPOINTS(file_space)) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src and dest data spaces have different sizes") + /* Check for a NULL buffer, after the H5S_ALL dataspace selection has been handled */ + if(NULL == dset_info[i].u.wbuf) { + /* Check for any elements selected (which is invalid) */ + if(nelmts > 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer") - /* Check for a NULL buffer, after the H5S_ALL dataspace selection has been handled */ - if(NULL == buf) { - /* Check for any elements selected (which is invalid) */ - if(nelmts > 0) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer") + /* If the buffer is nil, and 0 element is selected, make a fake buffer. + * This is for some MPI package like ChaMPIon on NCSA's tungsten which + * doesn't support this feature. + */ + dset_info[i].u.wbuf = &fake_char; + } /* end if */ - /* If the buffer is nil, and 0 element is selected, make a fake buffer. - * This is for some MPI package like ChaMPIon on NCSA's tungsten which - * doesn't support this feature. - */ - buf = &fake_char; - } /* end if */ + /* Make sure that both selections have their extents set */ + if(!(H5S_has_extent(dset_info[i].file_space))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file dataspace does not have extent set") + if(!(H5S_has_extent(dset_info[i].mem_space))) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set") + + /* H5S_select_shape_same() has been modified to accept topologically + * identical selections with different rank as having the same shape + * (if the most rapidly changing coordinates match up), but the I/O + * code still has difficulties with the notion. + * + * To solve this, we check to see if H5S_select_shape_same() returns + * true, and if the ranks of the mem and file spaces are different. + * If the are, construct a new mem space that is equivalent to the + * old mem space, and use that instead. + * + * Note that in general, this requires us to touch up the memory buffer + * as well. + */ + if(TRUE == H5S_select_shape_same(dset_info[i].mem_space, dset_info[i].file_space) && + H5S_GET_EXTENT_NDIMS(dset_info[i].mem_space) != H5S_GET_EXTENT_NDIMS(dset_info[i].file_space)) { + const void *adj_buf = NULL; /* Pointer to the location in buf corresponding */ + /* to the beginning of the projected mem space. */ + + /* Attempt to construct projected dataspace for memory dataspace */ + if(H5S_select_construct_projection(dset_info[i].mem_space, &(projected_mem_space[i]), + (unsigned)H5S_GET_EXTENT_NDIMS(dset_info[i].file_space), + dset_info[i].u.wbuf, (const void **)&adj_buf, + (hsize_t)dset_info[i].type_info.src_type_size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace") + HDassert(projected_mem_space[i]); + HDassert(adj_buf); + + /* Switch to using projected memory dataspace & adjusted buffer */ + dset_info[i].mem_space = projected_mem_space[i]; + dset_info[i].u.wbuf = adj_buf; + } /* end if */ - /* Make sure that both selections have their extents set */ - if(!(H5S_has_extent(file_space))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file dataspace does not have extent set") - if(!(H5S_has_extent(mem_space))) - HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set") - - /* H5S_select_shape_same() has been modified to accept topologically - * identical selections with different rank as having the same shape - * (if the most rapidly changing coordinates match up), but the I/O - * code still has difficulties with the notion. - * - * To solve this, we check to see if H5S_select_shape_same() returns - * true, and if the ranks of the mem and file spaces are different. - * If the are, construct a new mem space that is equivalent to the - * old mem space, and use that instead. - * - * Note that in general, this requires us to touch up the memory buffer - * as well. - */ - if(TRUE == H5S_select_shape_same(mem_space, file_space) && - H5S_GET_EXTENT_NDIMS(mem_space) != H5S_GET_EXTENT_NDIMS(file_space)) { - void *adj_buf = NULL; /* Pointer to the location in buf corresponding */ - /* to the beginning of the projected mem space. */ - - /* Attempt to construct projected dataspace for memory dataspace */ - if(H5S_select_construct_projection(mem_space, &projected_mem_space, - (unsigned)H5S_GET_EXTENT_NDIMS(file_space), buf, (const void **)&adj_buf, type_info.src_type_size) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to construct projected memory dataspace") - HDassert(projected_mem_space); - HDassert(adj_buf); - - /* Switch to using projected memory dataspace & adjusted buffer */ - mem_space = projected_mem_space; - buf = adj_buf; - } /* end if */ + /* Retrieve dataset properties */ + /* <none needed currently> */ - /* Retrieve dataset properties */ - /* <none needed currently> */ - - /* Set up I/O operation */ - io_info.op_type = H5D_IO_OP_WRITE; - io_info.u.wbuf = buf; - if(H5D__ioinfo_init(dataset, dxpl_cache, dxpl_id, &type_info, &store, &io_info) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up I/O operation") - io_info_init = TRUE; - - /* Allocate data space and initialize it if it hasn't been. */ - if(nelmts > 0 && dataset->shared->dcpl_cache.efl.nused == 0 && - !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage)) { - hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */ - hbool_t full_overwrite; /* Whether we are over-writing all the elements */ - - /* Get the number of elements in file dataset's dataspace */ - if((file_nelmts = H5S_GET_EXTENT_NPOINTS(file_space)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "can't retrieve number of elements in file dataset") - - /* Always allow fill values to be written if the dataset has a VL datatype */ - if(H5T_detect_class(dataset->shared->type, H5T_VLEN, FALSE)) - full_overwrite = FALSE; - else - full_overwrite = (hbool_t)((hsize_t)file_nelmts == nelmts ? TRUE : FALSE); - - /* Allocate storage */ - if(H5D__alloc_storage(&io_info, H5D_ALLOC_WRITE, full_overwrite, NULL) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") - } /* end if */ + /* Set up I/O operation */ + io_info.op_type = H5D_IO_OP_WRITE; + if(H5D__ioinfo_init(dset_info[i].dset, dxpl_cache, dxpl_id, &(dset_info[i]), + &(store[i]), &io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up I/O operation") +#ifdef H5_HAVE_PARALLEL + io_info_init = TRUE; +#endif /*H5_HAVE_PARALLEL*/ + + /* Allocate dataspace and initialize it if it hasn't been. */ + if(nelmts > 0 && dset_info[i].dset->shared->dcpl_cache.efl.nused == 0 && + !(*dset_info[i].dset->shared->layout.ops->is_space_alloc)(&dset_info[i].dset->shared->layout.storage)) { + hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */ + hbool_t full_overwrite; /* Whether we are over-writing all the elements */ + + /* Get the number of elements in file dataset's dataspace */ + if((file_nelmts = H5S_GET_EXTENT_NPOINTS(dset_info[i].file_space)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "can't retrieve number of elements in file dataset") + + /* Always allow fill values to be written if the dataset has a VL datatype */ + if(H5T_detect_class(dset_info[i].dset->shared->type, H5T_VLEN, FALSE)) + full_overwrite = FALSE; + else + full_overwrite = (hbool_t)((hsize_t)file_nelmts == nelmts ? TRUE : FALSE); + + io_info.dset = dset_info[i].dset; + /* Allocate storage */ + if(H5D__alloc_storage(&io_info, H5D_ALLOC_WRITE, full_overwrite, NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") + } /* end if */ - /* Call storage method's I/O initialization routine */ - HDmemset(&fm, 0, sizeof(H5D_chunk_map_t)); - if(io_info.layout_ops.io_init && (*io_info.layout_ops.io_init)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") - io_op_init = TRUE; + /* Call storage method's I/O initialization routine */ + /* Init io_info.dset_info[] and generate piece_info in skip list */ + if(dset_info[i].layout_ops.io_init && + (*dset_info[i].layout_ops.io_init)(&io_info, &(dset_info[i].type_info), nelmts, + dset_info[i].file_space, dset_info[i].mem_space, + &(dset_info[i])) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize I/O info") + io_op_init++; + + /* Reset metadata tagging */ + if(H5AC_tag(io_info.md_dxpl_id, prev_tag, NULL) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") + } /* end of Count for loop */ + assert(type_info_init == count); + assert(io_op_init == count); #ifdef H5_HAVE_PARALLEL /* Adjust I/O info for any parallel I/O */ - if(H5D__ioinfo_adjust(&io_info, dataset, dxpl_id, file_space, mem_space, &type_info, &fm) < 0) + if(H5D__ioinfo_adjust(count, &io_info, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to adjust I/O info for parallel I/O") +#else + io_info.is_coll_broken = TRUE; #endif /*H5_HAVE_PARALLEL*/ - /* Invoke correct "high level" I/O routine */ - if((*io_info.io_ops.multi_write)(&io_info, &type_info, nelmts, file_space, mem_space, &fm) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") + /* If collective mode is broken, perform write IO in independent mode via + * single-dset path with looping. + * Multiple-dset path can not be called since it is not supported, so make + * detour through single-dset path */ + if(TRUE == io_info.is_coll_broken) { + haddr_t prev_tag = HADDR_UNDEF; + + /* loop with serial & single-dset write IO path */ + for(i = 0; i < count; i++) { + /* set metadata tagging with dset oheader addr */ + if(H5AC_tag(io_info.md_dxpl_id, dset_info->dset->oloc.addr, &prev_tag) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") + + io_info.dsets_info = &(dset_info[i]); + + /* Invoke correct "high level" I/O routine */ + if((*io_info.io_ops.multi_write)(&io_info, &(dset_info[i].type_info), nelmts, dset_info[i].file_space, + dset_info[i].mem_space, &dset_info[i]) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") + /* Reset metadata tagging */ + if(H5AC_tag(io_info.md_dxpl_id, prev_tag, NULL) < 0) + HDONE_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "unable to apply metadata tag") + } + } /* end if */ + else + /* Invoke correct "high level" I/O routine */ + if((*io_info.io_ops.multi_write_md)(file_id, count, &io_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") #ifdef OLD_WAY /* @@ -801,8 +1292,10 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, done: /* Shut down the I/O op information */ - if(io_op_init && io_info.layout_ops.io_term && (*io_info.layout_ops.io_term)(&fm) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info") + for(i = 0; i < io_op_init; i++) + if(dset_info[i].layout_ops.io_term && (*dset_info[i].layout_ops.io_term)(&io_info, &(dset_info[i])) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down I/O op info") + if(io_info_init) { #ifdef H5_DEBUG_BUILD @@ -818,38 +1311,50 @@ done: } /* Shut down datatype info for operation */ - if(type_info_init && H5D__typeinfo_term(&type_info) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info") + for(i = 0; i < type_info_init; i++) + if(H5D__typeinfo_term(&(dset_info[i].type_info)) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down type info") + + /* Discard projected mem spaces if they were created */ + for(i = 0; i < count; i++) + if(NULL != projected_mem_space[i]) + if(H5S_close(projected_mem_space[i]) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace") + + /* Free global piece skiplist */ + if(io_info.sel_pieces) + if(H5SL_close(io_info.sel_pieces) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't close dataset skip list") + + /* io_info.dsets_info was allocated in calling function */ + if(projected_mem_space) + H5MM_xfree(projected_mem_space); + if(store) + H5MM_xfree(store); - /* discard projected mem space if it was created */ - if(NULL != projected_mem_space) - if(H5S_close(projected_mem_space) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to shut down projected memory dataspace") - - FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL) -} /* end H5D__write() */ + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__write */ /*------------------------------------------------------------------------- * Function: H5D__ioinfo_init * - * Purpose: Routine for determining correct I/O operations for - * each I/O action. + * Purpose: Routine for determining correct I/O operations for each I/O action. * - * Return: Non-negative on success/Negative on failure + * This was derived from H5D__ioinfo_init for multi-dset work. * - * Programmer: Quincey Koziol - * Thursday, September 30, 2004 + * Return: Non-negative on success/Negative on failure * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t H5D__ioinfo_init(H5D_t *dset, #ifndef H5_HAVE_PARALLEL -const + const #endif /* H5_HAVE_PARALLEL */ - H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, - const H5D_type_info_t *type_info, H5D_storage_t *store, H5D_io_info_t *io_info) + H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, + H5D_dset_info_t *dset_info, H5D_storage_t *store, H5D_io_info_t *io_info) { herr_t ret_value = SUCCEED; @@ -858,37 +1363,29 @@ const /* check args */ HDassert(dset); HDassert(dset->oloc.file); - HDassert(type_info); - HDassert(type_info->tpath); + //HDassert(&(dset_info->type_info)); + HDassert(dset_info->type_info.tpath); HDassert(io_info); - /* init both dxpls to the original one */ - io_info->md_dxpl_id = dxpl_id; - io_info->raw_dxpl_id = dxpl_id; - - /* set the dxpl IO type for sanity checking at the FD layer */ -#ifdef H5_DEBUG_BUILD - if(H5D_set_io_info_dxpls(io_info, dxpl_id) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't set metadata and raw data dxpls") -#endif /* H5_DEBUG_BUILD */ - /* Set up "normal" I/O fields */ - io_info->dset = dset; + dset_info->dset = dset; io_info->dxpl_cache = dxpl_cache; - io_info->store = store; + io_info->is_coll_broken = FALSE; /* is collective broken? */ + dset_info->store = store; /* Set I/O operations to initial values */ - io_info->layout_ops = *dset->shared->layout.ops; + dset_info->layout_ops = *dset->shared->layout.ops; /* Set the "high-level" I/O operations for the dataset */ io_info->io_ops.multi_read = dset->shared->layout.ops->ser_read; io_info->io_ops.multi_write = dset->shared->layout.ops->ser_write; /* Set the I/O operations for reading/writing single blocks on disk */ - if(type_info->is_xform_noop && type_info->is_conv_noop) { + if(dset_info->type_info.is_xform_noop && dset_info->type_info.is_conv_noop) { /* - * If there is no data transform or type conversion then read directly into - * the application's buffer. This saves at least one mem-to-mem copy. + * If there is no data transform or type conversion then read directly + * into the application's buffer. + * This saves at least one mem-to-mem copy. */ io_info->io_ops.single_read = H5D__select_read; io_info->io_ops.single_write = H5D__select_write; @@ -961,7 +1458,7 @@ H5D__typeinfo_init(const H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, } /* end else */ /* - * Locate the type conversion function and data space conversion + * Locate the type conversion function and dataspace conversion * functions, and set up the element numbering information. If a data * type conversion is necessary then register datatype atoms. Data type * conversion is necessary if the user has set the `need_bkg' to a high @@ -1073,18 +1570,17 @@ done: * * Purpose: Adjust operation's I/O info for any parallel I/O * - * Return: Non-negative on success/Negative on failure + * This was derived from H5D__ioinfo_adjust for multi-dset work. * - * Programmer: Quincey Koziol - * Thursday, March 27, 2008 + * Return: Non-negative on success/Negative on failure * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t -H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id, - const H5S_t *file_space, const H5S_t *mem_space, - const H5D_type_info_t *type_info, const H5D_chunk_map_t *fm) +H5D__ioinfo_adjust(const size_t count, H5D_io_info_t *io_info, hid_t dxpl_id) { + H5D_t *dset0; /* only the first dset , also for single dsets case */ H5P_genplist_t *dx_plist; /* Data transer property list */ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode; /* performed chunk optimization */ H5D_mpio_actual_io_mode_t actual_io_mode; /* performed io mode */ @@ -1093,14 +1589,14 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id, FUNC_ENTER_STATIC /* check args */ - HDassert(dset); - HDassert(dset->oloc.file); - HDassert(mem_space); - HDassert(file_space); - HDassert(type_info); - HDassert(type_info->tpath); + HDassert(count > 0); HDassert(io_info); + /* check the first dset, should exist either single or multi dset cases */ + HDassert(io_info->dsets_info[0].dset); + dset0 = io_info->dsets_info[0].dset; + HDassert(dset0->oloc.file); + /* Get the dataset transfer property list */ if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") @@ -1122,24 +1618,29 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id, /* Record the original state of parallel I/O transfer options */ io_info->orig.xfer_mode = io_info->dxpl_cache->xfer_mode; io_info->orig.coll_opt_mode = io_info->dxpl_cache->coll_opt_mode; + /* single-dset */ io_info->orig.io_ops.single_read = io_info->io_ops.single_read; io_info->orig.io_ops.single_write = io_info->io_ops.single_write; + /* multi-dset */ + io_info->orig.io_ops.single_read_md = io_info->io_ops.single_read_md; + io_info->orig.io_ops.single_write_md = io_info->io_ops.single_write_md; - /* Get MPI communicator */ - if(MPI_COMM_NULL == (io_info->comm = H5F_mpi_get_comm(dset->oloc.file))) + /* Get MPI communicator from the first dset */ + if(MPI_COMM_NULL == (io_info->comm = H5F_mpi_get_comm(dset0->oloc.file))) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator") - /* Check if we can set direct MPI-IO read/write functions */ - if((opt = H5D__mpio_opt_possible(io_info, file_space, mem_space, type_info, fm, dx_plist)) < 0) + if((opt = H5D__mpio_opt_possible(count, io_info, dx_plist)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "invalid check for direct IO dataspace ") /* Check if we can use the optimized parallel I/O routines */ if(opt == TRUE) { /* Override the I/O op pointers to the MPI-specific routines */ - io_info->io_ops.multi_read = dset->shared->layout.ops->par_read; - io_info->io_ops.multi_write = dset->shared->layout.ops->par_write; - io_info->io_ops.single_read = H5D__mpio_select_read; - io_info->io_ops.single_write = H5D__mpio_select_write; + io_info->io_ops.multi_read = NULL; + io_info->io_ops.multi_write = NULL; + io_info->io_ops.multi_read_md = dset0->shared->layout.ops->par_read; + io_info->io_ops.multi_write_md = dset0->shared->layout.ops->par_write; + io_info->io_ops.single_read_md = H5D__mpio_select_read; + io_info->io_ops.single_write_md = H5D__mpio_select_write; } /* end if */ else { /* If we won't be doing collective I/O, but the user asked for @@ -1151,10 +1652,14 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id, io_info->dxpl_cache->xfer_mode = H5FD_MPIO_INDEPENDENT; if(H5P_set(dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &io_info->dxpl_cache->xfer_mode) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode") + io_info->is_coll_broken = TRUE; } /* end if */ + else if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_INDEPENDENT) + io_info->is_coll_broken = TRUE; } /* end else */ } /* end if */ - + else + io_info->is_coll_broken = TRUE; done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__ioinfo_adjust() */ @@ -1164,13 +1669,13 @@ done: * Function: H5D__ioinfo_term * * Purpose: Common logic for terminating an I/O info object - * (Only used for restoring MPI transfer mode currently) + * (Only used for restoring MPI transfer mode currently) * - * Return: Non-negative on success/Negative on failure + * This was derived from H5D__ioinfo_term for multi-dset work. * - * Programmer: Quincey Koziol - * Friday, February 6, 2004 + * Return: Non-negative on success/Negative on failure * + * Programmer: Jonathan Kim Nov, 2013 *------------------------------------------------------------------------- */ static herr_t @@ -1212,7 +1717,6 @@ H5D__ioinfo_term(H5D_io_info_t *io_info) done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__ioinfo_term() */ - #endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 441cc96..7911b98 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -53,15 +53,14 @@ /* Macros to represent different IO options */ #define H5D_ONE_LINK_CHUNK_IO 0 -#define H5D_MULTI_CHUNK_IO 1 -#define H5D_ONE_LINK_CHUNK_IO_MORE_OPT 2 -#define H5D_MULTI_CHUNK_IO_MORE_OPT 3 /***** Macros for One linked collective IO case. *****/ /* The default value to do one linked collective IO for all chunks. - If the average number of chunks per process is greater than this value, - the library will create an MPI derived datatype to link all chunks to do collective IO. - The user can set this value through an API. */ + * If the average number of chunks per process is greater than this + * value, the library will create an MPI derived datatype to link all + * chunks to do collective IO. The user can set this value through an + * API. + */ /* Macros to represent options on how to obtain chunk address for one linked-chunk IO case */ #define H5D_OBTAIN_ONE_CHUNK_ADDR_IND 0 @@ -72,14 +71,11 @@ #define H5D_ALL_CHUNK_ADDR_THRES_COL_NUM 10000 /***** Macros for multi-chunk collective IO case. *****/ -/* The default value of the threshold to do collective IO for this chunk. - If the average number of processes per chunk is greater than the default value, - collective IO is done for this chunk. -*/ +/* The default value of the threshold to do collective IO for this + * chunk. If the average number of processes per chunk is greater + * than the default value, collective IO is done for this chunk. + */ -/* Macros to represent different IO modes(NONE, Independent or collective)for multiple chunk IO case */ -#define H5D_CHUNK_IO_MODE_IND 0 -#define H5D_CHUNK_IO_MODE_COL 1 /* Macros to represent the regularity of the selection for multiple chunk IO case. */ #define H5D_CHUNK_SELECT_REG 1 @@ -90,42 +86,26 @@ /******************/ /* Local Typedefs */ /******************/ -/* Combine chunk address and chunk info into a struct for better performance. */ +/* Combine chunk/piece address and chunk/piece info into a struct for + * better performance. */ typedef struct H5D_chunk_addr_info_t { - haddr_t chunk_addr; - H5D_chunk_info_t chunk_info; + /* piece for multi-dset */ + haddr_t piece_addr; + H5D_piece_info_t piece_info; } H5D_chunk_addr_info_t; /********************/ /* Local Prototypes */ /********************/ -static herr_t H5D__chunk_collective_io(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, H5D_chunk_map_t *fm); -static herr_t H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, H5D_chunk_map_t *fm, - H5P_genplist_t *dx_plist); -static herr_t H5D__link_chunk_collective_io(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, H5D_chunk_map_t *fm, int sum_chunk, - H5P_genplist_t *dx_plist); -static herr_t H5D__inter_collective_io(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, const H5S_t *file_space, - const H5S_t *mem_space); +/* multi-dset IO */ +static herr_t H5D__piece_io(const hid_t file_id, const size_t count, + H5D_io_info_t *io_info); static herr_t H5D__final_collective_io(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, MPI_Datatype *mpi_file_type, - MPI_Datatype *mpi_buf_type); -static herr_t H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm, - H5D_chunk_addr_info_t chunk_addr_info_array[], int many_chunk_opt); -static herr_t H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_chunk_map_t *fm, - H5P_genplist_t *dx_plist, uint8_t assign_io_mode[], haddr_t chunk_addr[]); -static herr_t H5D__ioinfo_xfer_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist, - H5FD_mpio_xfer_t xfer_mode); -static herr_t H5D__ioinfo_coll_opt_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist, - H5FD_mpio_collective_opt_t coll_opt_mode); -static herr_t H5D__mpio_get_min_chunk(const H5D_io_info_t *io_info, - const H5D_chunk_map_t *fm, int *min_chunkf); -static herr_t H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, - const H5D_chunk_map_t *fm, int *sum_chunkf); + hsize_t mpi_buf_count, MPI_Datatype *mpi_file_type, MPI_Datatype *mpi_buf_type); + +static herr_t H5D__all_piece_collective_io(const hid_t file_id, const size_t count, + H5D_io_info_t *io_info, H5P_genplist_t *dx_plist); /*********************/ @@ -142,90 +122,107 @@ static herr_t H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, * Function: H5D__mpio_opt_possible * * Purpose: Checks if an direct I/O transfer is possible between memory and - * the file. + * the file. + * + * This was derived from H5D__mpio_opt_possible for + * multi-dset work. * * Return: Sauccess: Non-negative: TRUE or FALSE * Failure: Negative * - * Programmer: Quincey Koziol - * Wednesday, April 3, 2002 - * *------------------------------------------------------------------------- */ htri_t -H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space, - const H5S_t *mem_space, const H5D_type_info_t *type_info, - const H5D_chunk_map_t *fm, H5P_genplist_t *dx_plist) +H5D__mpio_opt_possible(const size_t count, H5D_io_info_t *io_info, H5P_genplist_t *dx_plist) { - int local_cause = 0; /* Local reason(s) for breaking collective mode */ - int global_cause = 0; /* Global reason(s) for breaking collective mode */ - htri_t ret_value; /* Return value */ + int i; + H5D_t *dset; + H5S_t *file_space; + const H5S_t *mem_space; + H5D_type_info_t type_info; + /* variables to set cause of broken collective I/O */ + int local_cause = 0; + int global_cause = 0; + + int mpi_code; /* MPI error code */ + htri_t ret_value = TRUE; FUNC_ENTER_PACKAGE /* Check args */ HDassert(io_info); - HDassert(mem_space); - HDassert(file_space); - HDassert(type_info); + HDassert(dx_plist); + for (i = 0; i < count; i++) { + HDassert(io_info->dsets_info[i].file_space); + HDassert(io_info->dsets_info[i].mem_space); + } /* For independent I/O, get out quickly and don't try to form consensus */ if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_INDEPENDENT) local_cause |= H5D_MPIO_SET_INDEPENDENT; - /* Optimized MPI types flag must be set */ - /* (based on 'HDF5_MPI_OPT_TYPES' environment variable) */ - if(!H5FD_mpi_opt_types_g) - local_cause |= H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED; - - /* Don't allow collective operations if datatype conversions need to happen */ - if(!type_info->is_conv_noop) - local_cause |= H5D_MPIO_DATATYPE_CONVERSION; - - /* Don't allow collective operations if data transform operations should occur */ - if(!type_info->is_xform_noop) - local_cause |= H5D_MPIO_DATA_TRANSFORMS; - - /* Check whether these are both simple or scalar dataspaces */ - if(!((H5S_SIMPLE == H5S_GET_EXTENT_TYPE(mem_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(mem_space)) - && (H5S_SIMPLE == H5S_GET_EXTENT_TYPE(file_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(file_space)))) - local_cause |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; - - /* Dataset storage must be contiguous or chunked */ - if(!(io_info->dset->shared->layout.type == H5D_CONTIGUOUS || - io_info->dset->shared->layout.type == H5D_CHUNKED)) - local_cause |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - - /* check if external-file storage is used */ - if(io_info->dset->shared->dcpl_cache.efl.nused > 0) - local_cause |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - - /* The handling of memory space is different for chunking and contiguous - * storage. For contiguous storage, mem_space and file_space won't change - * when it it is doing disk IO. For chunking storage, mem_space will - * change for different chunks. So for chunking storage, whether we can - * use collective IO will defer until each chunk IO is reached. - */ - - /* Don't allow collective operations if filters need to be applied */ - if(io_info->dset->shared->layout.type == H5D_CHUNKED && - io_info->dset->shared->dcpl_cache.pline.nused > 0) - local_cause |= H5D_MPIO_FILTERS; + for (i = 0; i < count; i++) + { + dset = io_info->dsets_info[i].dset; + file_space = io_info->dsets_info[i].file_space; + mem_space = io_info->dsets_info[i].mem_space; + type_info = io_info->dsets_info[i].type_info; + + /* Optimized MPI types flag must be set */ + /* (based on 'HDF5_MPI_OPT_TYPES' environment variable) */ + if(!H5FD_mpi_opt_types_g) + local_cause |= H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED; + + /* Don't allow collective operations if datatype conversions need to happen */ + if(!type_info.is_conv_noop) + local_cause |= H5D_MPIO_DATATYPE_CONVERSION; + + /* Don't allow collective operations if data transform operations should occur */ + if(!type_info.is_xform_noop) + local_cause |= H5D_MPIO_DATA_TRANSFORMS; + + /* Check whether these are both simple or scalar dataspaces */ + if(!((H5S_SIMPLE == H5S_GET_EXTENT_TYPE(mem_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(mem_space)) + && (H5S_SIMPLE == H5S_GET_EXTENT_TYPE(file_space) || H5S_SCALAR == H5S_GET_EXTENT_TYPE(file_space)))) + local_cause |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; + + /* Dataset storage must be contiguous or chunked */ + if(!(dset->shared->layout.type == H5D_CONTIGUOUS || + dset->shared->layout.type == H5D_CHUNKED)) + local_cause |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + + /* check if external-file storage is used */ + if (dset->shared->dcpl_cache.efl.nused > 0) + local_cause |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + + /* The handling of memory space is different for chunking and contiguous + * storage. For contiguous storage, mem_space and file_space won't change + * when it it is doing disk IO. For chunking storage, mem_space will + * change for different chunks. So for chunking storage, whether we can + * use collective IO will defer until each chunk IO is reached. + */ + + /* Don't allow collective operations if filters need to be applied */ + if(dset->shared->layout.type == H5D_CHUNKED && + dset->shared->dcpl_cache.pline.nused > 0) + local_cause |= H5D_MPIO_FILTERS; + } /* end for loop */ /* Check for independent I/O */ if(local_cause & H5D_MPIO_SET_INDEPENDENT) global_cause = local_cause; else { - int mpi_code; /* MPI error code */ - /* Form consensus opinion among all processes about whether to perform * collective I/O */ - if(MPI_SUCCESS != (mpi_code = MPI_Allreduce(&local_cause, &global_cause, 1, MPI_INT, MPI_BOR, io_info->comm))) + if(MPI_SUCCESS != (mpi_code = MPI_Allreduce(&local_cause, &global_cause, 1, MPI_INT, + MPI_BOR, io_info->comm))) HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code) } /* end else */ + ret_value = global_cause > 0 ? FALSE : TRUE; + /* Write the local value of no-collective-cause to the DXPL. */ if(H5P_set(dx_plist, H5D_MPIO_LOCAL_NO_COLLECTIVE_CAUSE_NAME, &local_cause) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set local no collective cause property") @@ -234,7 +231,6 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space, if(H5P_set(dx_plist, H5D_MPIO_GLOBAL_NO_COLLECTIVE_CAUSE_NAME, &global_cause) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set global no collective cause property") - /* Set the return value, based on the global cause */ ret_value = global_cause > 0 ? FALSE : TRUE; done: @@ -247,24 +243,32 @@ done: * * Purpose: MPI-IO function to read directly from app buffer to file. * - * Return: non-negative on success, negative on failure. + * This was referred from H5D__mpio_select_read for + * multi-dset work. * - * Programmer: + * Return: non-negative on success, negative on failure. * *------------------------------------------------------------------------- */ herr_t -H5D__mpio_select_read(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, - hsize_t mpi_buf_count, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space) +H5D__mpio_select_read(const H5D_io_info_t *io_info, hsize_t mpi_buf_count, + const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space) { - const H5D_contig_storage_t *store_contig = &(io_info->store->contig); /* Contiguous storage info for this I/O operation */ + /* all dsets are in the same file, so just get it from the first dset */ + const H5F_t *file = io_info->dsets_info[0].dset->oloc.file; + void *rbuf = NULL; herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE + /* memory addr from a piece with lowest file addr */ + rbuf = io_info->base_maddr_r; + + /*OKAY: CAST DISCARDS CONST QUALIFIER*/ H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t); - if(H5F_block_read(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->raw_dxpl_id, io_info->u.rbuf) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "can't finish collective parallel read") + if(H5F_block_read(file, H5FD_MEM_DRAW, io_info->store_faddr, (size_t)mpi_buf_count, + io_info->raw_dxpl_id, rbuf) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "can't finish collective parallel write") done: FUNC_LEAVE_NOAPI(ret_value) @@ -276,24 +280,31 @@ done: * * Purpose: MPI-IO function to write directly from app buffer to file. * - * Return: non-negative on success, negative on failure. + * This was referred from H5D__mpio_select_write for + * multi-dset work. * - * Programmer: + * Return: non-negative on success, negative on failure. * *------------------------------------------------------------------------- */ herr_t -H5D__mpio_select_write(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, - hsize_t mpi_buf_count, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space) +H5D__mpio_select_write(const H5D_io_info_t *io_info, hsize_t mpi_buf_count, + const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space) { - const H5D_contig_storage_t *store_contig = &(io_info->store->contig); /* Contiguous storage info for this I/O operation */ + /* all dsets are in the same file, so just get it from the first dset */ + const H5F_t *file = io_info->dsets_info[0].dset->oloc.file; + const void *wbuf = NULL; herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE + /* memory addr from a piece with lowest file addr */ + wbuf = io_info->base_maddr_w; + /*OKAY: CAST DISCARDS CONST QUALIFIER*/ H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t); - if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, store_contig->dset_addr, (size_t)mpi_buf_count, io_info->raw_dxpl_id, io_info->u.wbuf) < 0) + if(H5F_block_write(file, H5FD_MEM_DRAW, io_info->store_faddr, (size_t)mpi_buf_count, + io_info->raw_dxpl_id, wbuf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "can't finish collective parallel write") done: @@ -302,294 +313,23 @@ done: /*------------------------------------------------------------------------- - * Function: H5D__ioinfo_xfer_mode - * - * Purpose: Switch to between collective & independent MPI I/O - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Friday, August 12, 2005 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__ioinfo_xfer_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist, - H5FD_mpio_xfer_t xfer_mode) -{ - herr_t ret_value = SUCCEED; /* return value */ - - FUNC_ENTER_STATIC - - /* Change the xfer_mode */ - io_info->dxpl_cache->xfer_mode = xfer_mode; - if(H5P_set(dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &io_info->dxpl_cache->xfer_mode) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode") - - /* Change the "single I/O" function pointers */ - if(xfer_mode == H5FD_MPIO_INDEPENDENT) { - /* Set the pointers to the original, non-MPI-specific routines */ - io_info->io_ops.single_read = io_info->orig.io_ops.single_read; - io_info->io_ops.single_write = io_info->orig.io_ops.single_write; - } /* end if */ - else { - HDassert(xfer_mode == H5FD_MPIO_COLLECTIVE); - - /* Set the pointers to the MPI-specific routines */ - io_info->io_ops.single_read = H5D__mpio_select_read; - io_info->io_ops.single_write = H5D__mpio_select_write; - } /* end else */ - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__ioinfo_xfer_mode() */ - - -/*------------------------------------------------------------------------- - * Function: H5D__ioinfo_coll_opt_mode - * - * Purpose: Switch between using collective & independent MPI I/O w/file - * set view - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: MuQun Yang - * Oct. 5th, 2006 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__ioinfo_coll_opt_mode(H5D_io_info_t *io_info, H5P_genplist_t *dx_plist, - H5FD_mpio_collective_opt_t coll_opt_mode) -{ - herr_t ret_value = SUCCEED; /* return value */ - - FUNC_ENTER_STATIC - - /* Change the optimal xfer_mode */ - io_info->dxpl_cache->coll_opt_mode = coll_opt_mode; - if(H5P_set(dx_plist, H5D_XFER_MPIO_COLLECTIVE_OPT_NAME, &io_info->dxpl_cache->coll_opt_mode) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__ioinfo_coll_opt_mode() */ - - -/*------------------------------------------------------------------------- - * Function: H5D__mpio_get_min_chunk - * - * Purpose: Routine for obtaining minimum number of chunks to cover - * hyperslab selection selected by all processors. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__mpio_get_min_chunk(const H5D_io_info_t *io_info, const H5D_chunk_map_t *fm, - int *min_chunkf) -{ - int num_chunkf; /* Number of chunks to iterate over */ - int mpi_code; /* MPI return code */ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_STATIC - - /* Get the number of chunks to perform I/O on */ - num_chunkf = H5SL_count(fm->sel_chunks); - - /* Determine the minimum # of chunks for all processes */ - if(MPI_SUCCESS != (mpi_code = MPI_Allreduce(&num_chunkf, min_chunkf, 1, MPI_INT, MPI_MIN, io_info->comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code) - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__mpio_get_min_chunk() */ - - -/*------------------------------------------------------------------------- - * Function: H5D__mpio_get_sum_chunk + * Function: H5D__piece_io * - * Purpose: Routine for obtaining total number of chunks to cover - * hyperslab selection selected by all processors. + * Purpose: Routine for choosing an IO option: + * a) Single collective IO defined by one MPI derived datatype + * to link through all pieces (chunks/contigs). Default. + * Note: previously there were other options, but cutoff as part of multi-dset work. * * Return: Non-negative on success/Negative on failure * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 - * *------------------------------------------------------------------------- */ static herr_t -H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, const H5D_chunk_map_t *fm, - int *sum_chunkf) -{ - int num_chunkf; /* Number of chunks to iterate over */ - size_t ori_num_chunkf; - int mpi_code; /* MPI return code */ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_STATIC - - /* Get the number of chunks to perform I/O on */ - num_chunkf = 0; - ori_num_chunkf = H5SL_count(fm->sel_chunks); - H5_CHECKED_ASSIGN(num_chunkf, int, ori_num_chunkf, size_t); - - /* Determine the summation of number of chunks for all processes */ - if(MPI_SUCCESS != (mpi_code = MPI_Allreduce(&num_chunkf, sum_chunkf, 1, MPI_INT, MPI_SUM, io_info->comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code) - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__mpio_get_sum_chunk() */ - - -/*------------------------------------------------------------------------- - * Function: H5D__contig_collective_read - * - * Purpose: Reads directly from contiguous data in file into application - * memory using collective I/O. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Tuesday, March 4, 2008 - * - *------------------------------------------------------------------------- - */ -herr_t -H5D__contig_collective_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t H5_ATTR_UNUSED nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *fm) -{ - H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CONTIGUOUS_COLLECTIVE; - H5P_genplist_t *dx_plist; /* Pointer to DXPL */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity check */ - HDassert(H5FD_MPIO == H5F_DRIVER_ID(io_info->dset->oloc.file)); - HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER)); - - /* Call generic internal collective I/O routine */ - if(H5D__inter_collective_io(io_info, type_info, file_space, mem_space) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't finish shared collective MPI-IO") - - /* Obtain the data transfer properties */ - if(NULL == (dx_plist = H5I_object(io_info->raw_dxpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") - - /* Set the actual I/O mode property. internal_collective_io will not break to - * independent I/O, so we set it here. - */ - if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__contig_collective_read() */ - - -/*------------------------------------------------------------------------- - * Function: H5D__contig_collective_write - * - * Purpose: Write directly to contiguous data in file from application - * memory using collective I/O. - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Quincey Koziol - * Tuesday, March 4, 2008 - * - *------------------------------------------------------------------------- - */ -herr_t -H5D__contig_collective_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t H5_ATTR_UNUSED nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *fm) -{ - H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CONTIGUOUS_COLLECTIVE; - H5P_genplist_t *dx_plist; /* Pointer to DXPL */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_PACKAGE - - /* Sanity check */ - HDassert(H5FD_MPIO == H5F_DRIVER_ID(io_info->dset->oloc.file)); - HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER)); - - /* Call generic internal collective I/O routine */ - if(H5D__inter_collective_io(io_info, type_info, file_space, mem_space) < 0) - HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't finish shared collective MPI-IO") - - /* Obtain the data transfer properties */ - if(NULL == (dx_plist = H5I_object(io_info->raw_dxpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") - - /* Set the actual I/O mode property. internal_collective_io will not break to - * independent I/O, so we set it here. - */ - if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__contig_collective_write() */ - - -/*------------------------------------------------------------------------- - * Function: H5D__chunk_collective_io - * - * Purpose: Routine for - * 1) choose an IO option: - * a) One collective IO defined by one MPI derived datatype to link through all chunks - * or b) multiple chunk IOs,to do MPI-IO for each chunk, the IO mode may be adjusted - * due to the selection pattern for each chunk. - * For option a) - * 1. Sort the chunk address, obtain chunk info according to the sorted chunk address - * 2. Build up MPI derived datatype for each chunk - * 3. Build up the final MPI derived datatype - * 4. Set up collective IO property list - * 5. Do IO - * For option b) - * 1. Use MPI_gather and MPI_Bcast to obtain information of *collective/independent/none* - * IO mode for each chunk of the selection - * 2. Depending on whether the IO mode is collective or independent or none, - * Create either MPI derived datatype for each chunk to do collective IO or - * just do independent IO or independent IO with file set view - * 3. Set up collective IO property list for collective mode - * 4. DO IO - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 - * - * Modification: - * - Refctore to remove multi-chunk-without-opimization feature and update for - * multi-chunk-io accordingly - * Programmer: Jonathan Kim - * Date: 2012-10-10 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - H5D_chunk_map_t *fm) +H5D__piece_io(const hid_t file_id, const size_t count, H5D_io_info_t *io_info) { H5P_genplist_t *dx_plist; /* Pointer to DXPL */ H5FD_mpio_chunk_opt_t chunk_opt_mode; - int io_option = H5D_MULTI_CHUNK_IO_MORE_OPT; - int sum_chunk = -1; -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - htri_t temp_not_link_io = FALSE; -#endif + int io_option = H5D_ONE_LINK_CHUNK_IO; herr_t ret_value = SUCCEED; FUNC_ENTER_STATIC @@ -597,11 +337,9 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf /* Sanity checks */ HDassert(io_info); HDassert(io_info->using_mpi_vfd); - HDassert(type_info); - HDassert(fm); /* Obtain the data transfer properties */ - if(NULL == (dx_plist = H5I_object(io_info->raw_dxpl_id))) + if(NULL == (dx_plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list") /* Check the optional property list on what to do with collective chunk IO. */ @@ -609,14 +347,9 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't get chunk optimization option") if(H5FD_MPIO_CHUNK_ONE_IO == chunk_opt_mode) io_option = H5D_ONE_LINK_CHUNK_IO; /*no opt*/ - /* direct request to multi-chunk-io */ - else if(H5FD_MPIO_CHUNK_MULTI_IO == chunk_opt_mode) - io_option = H5D_MULTI_CHUNK_IO; - /* via default path. branch by num threshold */ - else { - unsigned one_link_chunk_io_threshold; /* Threshhold to use single collective I/O for all chunks */ - int mpi_size; /* Number of processes in MPI job */ +/* MSC - From merge.. remove probably */ +#if 0 if(H5D__mpio_get_sum_chunk(io_info, fm, &sum_chunk) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the total chunk number of all processes"); if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0) @@ -630,83 +363,40 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf /* If the average number of chunk per process is greater than a threshold, we will do one link chunked IO. */ if((unsigned)sum_chunk / mpi_size >= one_link_chunk_io_threshold) io_option = H5D_ONE_LINK_CHUNK_IO_MORE_OPT; -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - else - temp_not_link_io = TRUE; #endif - } /* end else */ #ifdef H5_HAVE_INSTRUMENTED_LIBRARY -{ - H5P_genplist_t *plist; /* Property list pointer */ - htri_t check_prop; - int new_value; - - /* Get the dataset transfer property list */ - if(NULL == (plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id))) - HGOTO_ERROR(H5E_IO, H5E_BADTYPE, FAIL, "not a dataset transfer property list") - - /*** Test collective chunk user-input optimization APIs. ***/ - check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME); - if(check_prop > 0) { - if(H5D_ONE_LINK_CHUNK_IO == io_option) { - new_value = 0; - if(H5P_set(plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &new_value) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value") - } /* end if */ - } /* end if */ - check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME); - if(check_prop > 0) { - if(H5D_MULTI_CHUNK_IO == io_option) { - new_value = 0; - if(H5P_set(plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &new_value) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value") - } /* end if */ - } /* end if */ - check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME); - if(check_prop > 0) { - if(H5D_ONE_LINK_CHUNK_IO_MORE_OPT == io_option) { - new_value = 0; - if(H5P_set(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &new_value) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value") - } /* end if */ - } /* end if */ - check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME); - if(check_prop > 0) { - if(temp_not_link_io) { - new_value = 0; - if(H5P_set(plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &new_value) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value") + { + htri_t check_prop; + int new_value; + + /*** Test collective chunk user-input optimization APIs. ***/ + check_prop = H5Pexist(io_info->raw_dxpl_id, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME); + if(check_prop > 0) { + if(H5D_ONE_LINK_CHUNK_IO == io_option) { + new_value = 0; + if(H5Pset(io_info->raw_dxpl_id, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &new_value) < 0) + HGOTO_ERROR(H5E_IO, H5E_CANTSET, FAIL, "unable to set property value") + } /* end if */ } /* end if */ - } /* end if */ -} + } /* end block */ #endif /* step 2: Go ahead to do IO.*/ - if(H5D_ONE_LINK_CHUNK_IO == io_option || H5D_ONE_LINK_CHUNK_IO_MORE_OPT == io_option) { - if(H5D__link_chunk_collective_io(io_info, type_info, fm, sum_chunk, dx_plist) < 0) + if(H5D_ONE_LINK_CHUNK_IO == io_option) { + if(H5D__all_piece_collective_io(file_id, count, io_info, dx_plist) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish linked chunk MPI-IO") } /* end if */ - /* direct request to multi-chunk-io */ - else if(H5D_MULTI_CHUNK_IO == io_option) { - if(H5D__multi_chunk_collective_io(io_info, type_info, fm, dx_plist) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish optimized multiple chunk MPI-IO") - } /* end if */ - else { /* multiple chunk IO via threshold */ - if(H5D__multi_chunk_collective_io(io_info, type_info, fm, dx_plist) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish optimized multiple chunk MPI-IO") - } /* end else */ - done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__chunk_collective_io */ +} /* end H5D__piece_io */ /*------------------------------------------------------------------------- - * Function: H5D__chunk_collective_read + * Function: H5D__collective_read * - * Purpose: Reads directly from chunks in file into application memory - * using collective I/O. + * Purpose: Read directly from pieces (chunks/contig) in file into + * application memory using collective I/O. * * Return: Non-negative on success/Negative on failure * @@ -716,28 +406,26 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D__chunk_collective_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, - H5D_chunk_map_t *fm) +H5D__collective_read(const hid_t file_id, const size_t count, H5D_io_info_t *io_info) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Call generic selection operation */ - if(H5D__chunk_collective_io(io_info, type_info, fm) < 0) + if(H5D__piece_io(file_id, count, io_info) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, FAIL, "read error") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__chunk_collective_read() */ +} /* end H5D__collective_read() */ /*------------------------------------------------------------------------- - * Function: H5D__chunk_collective_write + * Function: H5D__collective_write * - * Purpose: Write directly to chunks in file from application memory - * using collective I/O. + * Purpose: Write directly to pieces (chunks/contig) in file into + * application memory using collective I/O. * * Return: Non-negative on success/Negative on failure * @@ -747,60 +435,49 @@ done: *------------------------------------------------------------------------- */ herr_t -H5D__chunk_collective_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t H5_ATTR_UNUSED nelmts, const H5S_t H5_ATTR_UNUSED *file_space, const H5S_t H5_ATTR_UNUSED *mem_space, - H5D_chunk_map_t *fm) +H5D__collective_write(const hid_t file_id, const size_t count, H5D_io_info_t *io_info) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Call generic selection operation */ - if(H5D__chunk_collective_io(io_info, type_info, fm) < 0) + if(H5D__piece_io(file_id, count, io_info) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error") done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__chunk_collective_write() */ +} /* end H5D__collective_write() */ /*------------------------------------------------------------------------- - * Function: H5D__link_chunk_collective_io + * Function: H5D__all_piece_collective_io * - * Purpose: Routine for one collective IO with one MPI derived datatype to link with all chunks + * Purpose: Routine for single collective IO with one MPI derived datatype + * to link with all pieces (chunks + contig) * - * 1. Sort the chunk address and chunk info - * 2. Build up MPI derived datatype for each chunk - * 3. Build up the final MPI derived datatype - * 4. Use common collective IO routine to do MPI-IO + * 1. Use the piece addresses and piece info sorted in skiplist + * 2. Build up MPI derived datatype for each chunk + * 3. Build up the final MPI derived datatype + * 4. Use common collective IO routine to do MPI-IO * * Return: Non-negative on success/Negative on failure * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 - * - * Modification: - * - Set H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME and H5D_MPIO_ACTUAL_IO_MODE_NAME - * dxpl in this. - * Programmer: Jonathan Kim - * Date: 2012-10-10 *------------------------------------------------------------------------- */ static herr_t -H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - H5D_chunk_map_t *fm, int sum_chunk, H5P_genplist_t *dx_plist) +H5D__all_piece_collective_io(H5_ATTR_UNUSED const hid_t file_id, const size_t count, + H5D_io_info_t *io_info, H5P_genplist_t *dx_plist) { - H5D_chunk_addr_info_t *chunk_addr_info_array = NULL; MPI_Datatype chunk_final_mtype; /* Final memory MPI datatype for all chunks with seletion */ hbool_t chunk_final_mtype_is_derived = FALSE; MPI_Datatype chunk_final_ftype; /* Final file MPI datatype for all chunks with seletion */ hbool_t chunk_final_ftype_is_derived = FALSE; H5D_storage_t ctg_store; /* Storage info for "fake" contiguous dataset */ - size_t total_chunks; - haddr_t *total_chunk_addr_array = NULL; + size_t i; MPI_Datatype *chunk_mtype = NULL; MPI_Datatype *chunk_ftype = NULL; - MPI_Aint *chunk_disp_array = NULL; + MPI_Aint *chunk_file_disp_array = NULL; MPI_Aint *chunk_mem_disp_array = NULL; hbool_t *chunk_mft_is_derived_array = NULL; /* Flags to indicate each chunk's MPI file datatype is derived */ hbool_t *chunk_mbt_is_derived_array = NULL; /* Flags to indicate each chunk's MPI memory datatype is derived */ @@ -808,11 +485,26 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ int *chunk_mpi_mem_counts = NULL; /* Count of MPI memory datatype for each chunk */ int mpi_code; /* MPI return code */ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode = H5D_MPIO_LINK_CHUNK; - H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_CHUNK_COLLECTIVE; + H5D_mpio_actual_io_mode_t actual_io_mode = 0; herr_t ret_value = SUCCEED; FUNC_ENTER_STATIC + /* set actual_io_mode */ + for (i=0; i < count; i++) { + if (io_info->dsets_info[i].layout->type == H5D_CHUNKED) + actual_io_mode |= H5D_MPIO_CHUNK_COLLECTIVE; + else if (io_info->dsets_info[i].layout->type == H5D_CONTIGUOUS) { + actual_io_mode |= H5D_MPIO_CONTIGUOUS_COLLECTIVE; + + /* if only single-dset */ + if (1 == count) + actual_chunk_opt_mode = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + } + else + HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout") + } + /* Set the actual-chunk-opt-mode property. */ if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property") @@ -822,129 +514,79 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") - /* Get the sum # of chunks, if not already available */ - if(sum_chunk < 0) { - if(H5D__mpio_get_sum_chunk(io_info, fm, &sum_chunk) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to obtain the total chunk number of all processes"); - } /* end if */ - - /* Retrieve total # of chunks in dataset */ - H5_CHECKED_ASSIGN(total_chunks, size_t, fm->layout->u.chunk.nchunks, hsize_t); - - /* Handle special case when dataspace dimensions only allow one chunk in - * the dataset. [This sometimes is used by developers who want the - * equivalent of compressed contiguous datasets - QAK] - */ - if(total_chunks == 1) { - H5SL_node_t *chunk_node; /* Pointer to chunk node for selection */ - H5S_t *fspace; /* Dataspace describing chunk & selection in it */ - H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */ - - /* Check for this process having selection in this chunk */ - chunk_node = H5SL_first(fm->sel_chunks); - - if(chunk_node == NULL) { - /* Set the dataspace info for I/O to NULL, this process doesn't have any I/O to perform */ - fspace = mspace = NULL; - - /* Initialize chunk address */ - ctg_store.contig.dset_addr = 0; - } /* end if */ - else { - H5D_chunk_ud_t udata; /* User data for querying chunk info */ - H5D_chunk_info_t *chunk_info; /* Info for chunk in skiplist */ - - /* Get the chunk info, for the selection in the chunk */ - if(NULL == (chunk_info = (H5D_chunk_info_t *)H5SL_item(chunk_node))) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skip list") - - /* Set the dataspace info for I/O */ - fspace = chunk_info->fspace; - mspace = chunk_info->mspace; - - /* Look up address of chunk */ - if(H5D__chunk_lookup(io_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk address") - ctg_store.contig.dset_addr = udata.chunk_block.offset; - } /* end else */ - - /* Set up the base storage address for this chunk */ - io_info->store = &ctg_store; - -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"before inter_collective_io for total chunk = 1 \n"); -#endif - - /* Perform I/O */ - if(H5D__inter_collective_io(io_info, type_info, fspace, mspace) < 0) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO") - } /* end if */ - else { + /* Code block for actual actions (Build a MPI Type, IO) */ + { hsize_t mpi_buf_count; /* Number of MPI types */ size_t num_chunk; /* Number of chunks for this process */ - size_t u; /* Local index variable */ + size_t u=0; /* Local index variable */ + + H5SL_node_t *piece_node; /* Current node in chunk skip list */ + H5D_piece_info_t *piece_info; + + /* local variable for base address for write buffer */ + const void * base_wbuf_addr = NULL; + void * base_rbuf_addr = NULL; /* Get the number of chunks with a selection */ - num_chunk = H5SL_count(fm->sel_chunks); + num_chunk = H5SL_count(io_info->sel_pieces); H5_CHECK_OVERFLOW(num_chunk, size_t, int); -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"total_chunks = %Zu, num_chunk = %Zu\n", total_chunks, num_chunk); -#endif - /* Set up MPI datatype for chunks selected */ if(num_chunk) { /* Allocate chunking information */ - if(NULL == (chunk_addr_info_array = (H5D_chunk_addr_info_t *)H5MM_malloc(num_chunk * sizeof(H5D_chunk_addr_info_t)))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk array buffer") - if(NULL == (chunk_mtype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype)))) + if(NULL == (chunk_mtype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory datatype buffer") - if(NULL == (chunk_ftype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype)))) + if(NULL == (chunk_ftype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file datatype buffer") - if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc(num_chunk * sizeof(MPI_Aint)))) + if(NULL == (chunk_file_disp_array = (MPI_Aint *)H5MM_malloc(num_chunk * sizeof(MPI_Aint)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer") - if(NULL == (chunk_mem_disp_array = (MPI_Aint *)H5MM_calloc(num_chunk * sizeof(MPI_Aint)))) + if(NULL == (chunk_mem_disp_array = (MPI_Aint *)H5MM_calloc(num_chunk * sizeof(MPI_Aint)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory displacement buffer") - if(NULL == (chunk_mpi_mem_counts = (int *)H5MM_calloc(num_chunk * sizeof(int)))) + if(NULL == (chunk_mpi_mem_counts = (int *)H5MM_calloc(num_chunk * sizeof(int)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory counts buffer") - if(NULL == (chunk_mpi_file_counts = (int *)H5MM_calloc(num_chunk * sizeof(int)))) + if(NULL == (chunk_mpi_file_counts = (int *)H5MM_calloc(num_chunk * sizeof(int)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file counts buffer") - if(NULL == (chunk_mbt_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t)))) + if(NULL == (chunk_mbt_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory is derived datatype flags buffer") - if(NULL == (chunk_mft_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t)))) + if(NULL == (chunk_mft_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file is derived datatype flags buffer") -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"before sorting the chunk address \n"); -#endif - /* Sort the chunk address */ - if(H5D__sort_chunk(io_info, fm, chunk_addr_info_array, sum_chunk) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to sort chunk address") - ctg_store.contig.dset_addr = chunk_addr_info_array[0].chunk_addr; + /* get first piece, which is sorted in skiplist */ + if(NULL == (piece_node = H5SL_first(io_info->sel_pieces))) + HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get piece node from skipped list") + if(NULL == (piece_info = (H5D_piece_info_t *)H5SL_item(piece_node))) + HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get piece info from skipped list") + /* save lowest file address */ + ctg_store.contig.dset_addr = piece_info->faddr; + + /* save base mem addr of piece for read/write */ + base_wbuf_addr = piece_info->dset_info->u.wbuf; + base_rbuf_addr = piece_info->dset_info->u.rbuf; #ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"after sorting the chunk address \n"); + if(H5DEBUG(D)) + HDfprintf(H5DEBUG(D),"before iterate over selected pieces\n"); #endif - /* Obtain MPI derived datatype from all individual chunks */ - for(u = 0; u < num_chunk; u++) { + /* Obtain MPI derived datatype from all individual pieces */ + /* Iterate over selected pieces for this process */ + while(piece_node) { hsize_t *permute_map = NULL; /* array that holds the mapping from the old, out-of-order displacements to the in-order displacements of the MPI datatypes of the point selection of the file space */ hbool_t is_permuted = FALSE; + if(NULL == (piece_info = (H5D_piece_info_t *)H5SL_item(piece_node))) + HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get piece info from skipped list") + /* Obtain disk and memory MPI derived datatype */ /* NOTE: The permute_map array can be allocated within H5S_mpio_space_type * and will be fed into the next call to H5S_mpio_space_type * where it will be freed. */ - if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.fspace, - type_info->src_type_size, + if(H5S_mpio_space_type(piece_info->fspace, + piece_info->dset_info->type_info.src_type_size, &chunk_ftype[u], /* OUT: datatype created */ &chunk_mpi_file_counts[u], /* OUT */ &(chunk_mft_is_derived_array[u]), /* OUT */ @@ -959,11 +601,13 @@ if(H5DEBUG(D)) are out of order */ &is_permuted /* OUT */) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type") + /* Sanity check */ if(is_permuted) HDassert(permute_map); - if(H5S_mpio_space_type(chunk_addr_info_array[u].chunk_info.mspace, - type_info->dst_type_size, &chunk_mtype[u], + if(H5S_mpio_space_type(piece_info->mspace, + piece_info->dset_info->type_info.dst_type_size, + &chunk_mtype[u], &chunk_mpi_mem_counts[u], &(chunk_mbt_is_derived_array[u]), FALSE, /* this is a memory @@ -985,24 +629,38 @@ if(H5DEBUG(D)) if(is_permuted) HDassert(!permute_map); - /* Chunk address relative to the first chunk */ - chunk_addr_info_array[u].chunk_addr -= ctg_store.contig.dset_addr; - - /* Assign chunk address to MPI displacement */ - /* (assume MPI_Aint big enough to hold it) */ - chunk_disp_array[u] = (MPI_Aint)chunk_addr_info_array[u].chunk_addr; - } /* end for */ - - /* Create final MPI derived datatype for the file */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)num_chunk, chunk_mpi_file_counts, chunk_disp_array, chunk_ftype, &chunk_final_ftype))) + /* Piece address relative to the first piece addr + * Assign piece address to MPI displacement + * (assume MPI_Aint big enough to hold it) */ + chunk_file_disp_array[u] = (MPI_Aint)piece_info->faddr - (MPI_Aint)ctg_store.contig.dset_addr; + + if(io_info->op_type == H5D_IO_OP_WRITE) { + chunk_mem_disp_array[u] = (MPI_Aint)piece_info->dset_info->u.wbuf - (MPI_Aint)base_wbuf_addr; + } + else if (io_info->op_type == H5D_IO_OP_READ) { + chunk_mem_disp_array[u] = (MPI_Aint)piece_info->dset_info->u.rbuf - (MPI_Aint)base_rbuf_addr; + } + + /* Advance to next piece in list */ + u++; + piece_node = H5SL_next(piece_node); + } /* end while */ + + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)num_chunk, chunk_mpi_file_counts, + chunk_file_disp_array, chunk_ftype, + &chunk_final_ftype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_ftype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) chunk_final_ftype_is_derived = TRUE; /* Create final MPI derived datatype for memory */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)num_chunk, chunk_mpi_mem_counts, chunk_mem_disp_array, chunk_mtype, &chunk_final_mtype))) + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)num_chunk, chunk_mpi_mem_counts, + chunk_mem_disp_array, chunk_mtype, + &chunk_final_mtype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) + if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&chunk_final_mtype))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) chunk_final_mtype_is_derived = TRUE; @@ -1022,40 +680,36 @@ if(H5DEBUG(D)) mpi_buf_count = (hsize_t)1; } /* end if */ else { /* no selection at all for this process */ - /* Allocate chunking information */ - if(NULL == (total_chunk_addr_array = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * total_chunks))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate total chunk address arraybuffer") - - /* Retrieve chunk address map */ - if(H5D__chunk_addrmap(io_info, total_chunk_addr_array) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") - /* Get chunk with lowest address */ - ctg_store.contig.dset_addr = HADDR_MAX; - for(u = 0; u < total_chunks; u++) - if(total_chunk_addr_array[u] < ctg_store.contig.dset_addr) - ctg_store.contig.dset_addr = total_chunk_addr_array[u]; - HDassert(ctg_store.contig.dset_addr != HADDR_MAX); - - /* Set the MPI datatype */ + /* since this process doesn't do any io, just pass a valid addr. + * at this point dset object hear address is availbe to any + * process, so just pass it. 0x0 also work fine */ + ctg_store.contig.dset_addr = 0x0; + /* or ctg_store.contig.dset_addr = io_info->dsets_info[0].dset->oloc.addr; */ + + /* just provide a valid mem address. no actual IO occur */ + base_wbuf_addr = io_info->dsets_info[0].u.wbuf; + base_rbuf_addr = io_info->dsets_info[0].u.rbuf; + + /* Set the MPI datatype to just participate */ chunk_final_ftype = MPI_BYTE; chunk_final_mtype = MPI_BYTE; - /* No chunks selected for this process */ mpi_buf_count = (hsize_t)0; } /* end else */ #ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"before coming to final collective IO\n"); + if(H5DEBUG(D)) + HDfprintf(H5DEBUG(D),"before coming to final collective IO\n"); #endif - - /* Set up the base storage address for this chunk */ - io_info->store = &ctg_store; + /* Set up the base storage address for this piece */ + io_info->store_faddr = ctg_store.contig.dset_addr; + io_info->base_maddr_w = base_wbuf_addr; + io_info->base_maddr_r = base_rbuf_addr; /* Perform final collective I/O operation */ - if(H5D__final_collective_io(io_info, type_info, mpi_buf_count, &chunk_final_ftype, &chunk_final_mtype) < 0) + if(H5D__final_collective_io(io_info, mpi_buf_count, &chunk_final_ftype, &chunk_final_mtype) < 0) HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO") - } /* end else */ + } done: #ifdef H5D_DEBUG @@ -1063,16 +717,12 @@ if(H5DEBUG(D)) HDfprintf(H5DEBUG(D),"before freeing memory inside H5D_link_collective_io ret_value = %d\n", ret_value); #endif /* Release resources */ - if(total_chunk_addr_array) - H5MM_xfree(total_chunk_addr_array); - if(chunk_addr_info_array) - H5MM_xfree(chunk_addr_info_array); if(chunk_mtype) H5MM_xfree(chunk_mtype); if(chunk_ftype) H5MM_xfree(chunk_ftype); - if(chunk_disp_array) - H5MM_xfree(chunk_disp_array); + if(chunk_file_disp_array) + H5MM_xfree(chunk_file_disp_array); if(chunk_mem_disp_array) H5MM_xfree(chunk_mem_disp_array); if(chunk_mpi_mem_counts) @@ -1091,328 +741,7 @@ if(H5DEBUG(D)) HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__link_chunk_collective_io */ - - -/*------------------------------------------------------------------------- - * Function: H5D__multi_chunk_collective_io - * - * Purpose: To do IO per chunk according to IO mode(collective/independent/none) - * - * 1. Use MPI_gather and MPI_Bcast to obtain IO mode in each chunk(collective/independent/none) - * 2. Depending on whether the IO mode is collective or independent or none, - * Create either MPI derived datatype for each chunk or just do independent IO - * 3. Use common collective IO routine to do MPI-IO - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 - * - * Modification: - * - Set H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME dxpl in this to go along with - * setting H5D_MPIO_ACTUAL_IO_MODE_NAME dxpl at the bottom. - * Programmer: Jonathan Kim - * Date: 2012-10-10 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - H5D_chunk_map_t *fm, H5P_genplist_t *dx_plist) -{ - H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ - H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ - H5D_io_info_t cpt_io_info; /* Compact I/O info object */ - H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ - hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ - uint8_t *chunk_io_option = NULL; - haddr_t *chunk_addr = NULL; - H5D_storage_t store; /* union of EFL and chunk pointer in file space */ - H5FD_mpio_xfer_t last_xfer_mode = H5FD_MPIO_COLLECTIVE; /* Last parallel transfer for this request (H5D_XFER_IO_XFER_MODE_NAME) */ - H5FD_mpio_collective_opt_t last_coll_opt_mode = H5FD_MPIO_COLLECTIVE_IO; /* Last parallel transfer with independent IO or collective IO with this mode */ - size_t total_chunk; /* Total # of chunks in dataset */ -#ifdef H5Dmpio_DEBUG - int mpi_rank; -#endif - size_t u; /* Local index variable */ - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode = H5D_MPIO_MULTI_CHUNK; /* actual chunk optimization mode */ - H5D_mpio_actual_io_mode_t actual_io_mode = H5D_MPIO_NO_COLLECTIVE; /* Local variable for tracking the I/O mode used. */ - herr_t ret_value = SUCCEED; - - FUNC_ENTER_STATIC - - /* Set the actual chunk opt mode property */ - if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_CHUNK_OPT_MODE_NAME, &actual_chunk_opt_mode) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual chunk opt mode property") - -#ifdef H5Dmpio_DEBUG - mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file); -#endif - - /* Retrieve total # of chunks in dataset */ - H5_CHECKED_ASSIGN(total_chunk, size_t, fm->layout->u.chunk.nchunks, hsize_t); - HDassert(total_chunk != 0); - - /* Allocate memories */ - chunk_io_option = (uint8_t *)H5MM_calloc(total_chunk); - chunk_addr = (haddr_t *)H5MM_calloc(total_chunk * sizeof(haddr_t)); -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D), "total_chunk %Zu\n", total_chunk); -#endif - - /* Obtain IO option for each chunk */ - if(H5D__obtain_mpio_mode(io_info, fm, dx_plist, chunk_io_option, chunk_addr) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTRECV, FAIL, "unable to obtain MPIO mode") - - /* Set up contiguous I/O info object */ - HDmemcpy(&ctg_io_info, io_info, sizeof(ctg_io_info)); - ctg_io_info.store = &ctg_store; - ctg_io_info.layout_ops = *H5D_LOPS_CONTIG; - - /* Initialize temporary contiguous storage info */ - ctg_store.contig.dset_size = (hsize_t)io_info->dset->shared->layout.u.chunk.size; - - /* Set up compact I/O info object */ - HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info)); - cpt_io_info.store = &cpt_store; - cpt_io_info.layout_ops = *H5D_LOPS_COMPACT; - - /* Initialize temporary compact storage info */ - cpt_store.compact.dirty = &cpt_dirty; - - /* Set dataset storage for I/O info */ - io_info->store = &store; - - /* Loop over _all_ the chunks */ - for(u = 0; u < total_chunk; u++) { - H5D_chunk_info_t *chunk_info; /* Chunk info for current chunk */ - H5S_t *fspace; /* Dataspace describing chunk & selection in it */ - H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */ - -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"mpi_rank = %d, chunk index = %Zu\n", mpi_rank, u); -#endif - /* Get the chunk info for this chunk, if there are elements selected */ - chunk_info = fm->select_chunk[u]; - - /* Set the storage information for chunks with selections */ - if(chunk_info) { - HDassert(chunk_info->index == u); - - /* Pass in chunk's coordinates in a union. */ - store.chunk.scaled = chunk_info->scaled; - } /* end if */ - - /* Collective IO for this chunk, - * Note: even there is no selection for this process, the process still - * needs to contribute MPI NONE TYPE. - */ - if(chunk_io_option[u] == H5D_CHUNK_IO_MODE_COL) { -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"inside collective chunk IO mpi_rank = %d, chunk index = %Zu\n", mpi_rank, u); -#endif - - /* Set the file & memory dataspaces */ - if(chunk_info) { - fspace = chunk_info->fspace; - mspace = chunk_info->mspace; - - /* Update the local variable tracking the dxpl's actual io mode property. - * - * Note: H5D_MPIO_COLLECTIVE_MULTI | H5D_MPIO_INDEPENDENT = H5D_MPIO_MIXED - * to ease switching between to mixed I/O without checking the current - * value of the property. You can see the definition in H5Ppublic.h - */ - actual_io_mode = actual_io_mode | H5D_MPIO_CHUNK_COLLECTIVE; - - } /* end if */ - else { - fspace = mspace = NULL; - } /* end else */ - - /* Switch back to collective I/O */ - if(last_xfer_mode != H5FD_MPIO_COLLECTIVE) { - if(H5D__ioinfo_xfer_mode(io_info, dx_plist, H5FD_MPIO_COLLECTIVE) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to collective I/O") - last_xfer_mode = H5FD_MPIO_COLLECTIVE; - } /* end if */ - if(last_coll_opt_mode != H5FD_MPIO_COLLECTIVE_IO) { - if(H5D__ioinfo_coll_opt_mode(io_info, dx_plist, H5FD_MPIO_COLLECTIVE_IO) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to collective I/O") - last_coll_opt_mode = H5FD_MPIO_COLLECTIVE_IO; - } /* end if */ - - /* Initialize temporary contiguous storage address */ - ctg_store.contig.dset_addr = chunk_addr[u]; - - /* Perform the I/O */ - if(H5D__inter_collective_io(&ctg_io_info, type_info, fspace, mspace) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO") - } /* end if */ - else { /* possible independent IO for this chunk */ -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"inside independent IO mpi_rank = %d, chunk index = %Zu\n", mpi_rank, u); -#endif - - HDassert(chunk_io_option[u] == 0); - - /* Set the file & memory dataspaces */ - if(chunk_info) { - fspace = chunk_info->fspace; - mspace = chunk_info->mspace; - - /* Update the local variable tracking the dxpl's actual io mode. */ - actual_io_mode = actual_io_mode | H5D_MPIO_CHUNK_INDEPENDENT; - } /* end if */ - else { - fspace = mspace = NULL; - } /* end else */ - - /* Using independent I/O with file setview.*/ - if(last_coll_opt_mode != H5FD_MPIO_INDIVIDUAL_IO) { - if(H5D__ioinfo_coll_opt_mode(io_info, dx_plist, H5FD_MPIO_INDIVIDUAL_IO) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't switch to individual I/O") - last_coll_opt_mode = H5FD_MPIO_INDIVIDUAL_IO; - } /* end if */ - - /* Initialize temporary contiguous storage address */ - ctg_store.contig.dset_addr = chunk_addr[u]; - - /* Perform the I/O */ - if(H5D__inter_collective_io(&ctg_io_info, type_info, fspace, mspace) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO") -#ifdef H5D_DEBUG - if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"after inter collective IO\n"); -#endif - } /* end else */ - } /* end for */ - - /* Write the local value of actual io mode to the DXPL. */ - if(H5P_set(dx_plist, H5D_MPIO_ACTUAL_IO_MODE_NAME, &actual_io_mode) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "couldn't set actual io mode property") - -done: - if(chunk_io_option) - H5MM_xfree(chunk_io_option); - if(chunk_addr) - H5MM_xfree(chunk_addr); - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__multi_chunk_collective_io */ - - -/*------------------------------------------------------------------------- - * Function: H5D__inter_collective_io - * - * Purpose: Routine for the shared part of collective IO between multiple chunk - * collective IO and contiguous collective IO - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - const H5S_t *file_space, const H5S_t *mem_space) -{ - int mpi_buf_count; /* # of MPI types */ - hbool_t mbt_is_derived = FALSE; - hbool_t mft_is_derived = FALSE; - MPI_Datatype mpi_file_type, mpi_buf_type; - int mpi_code; /* MPI return code */ - herr_t ret_value = SUCCEED; /* return value */ - - FUNC_ENTER_STATIC - - if((file_space != NULL) && (mem_space != NULL)) { - int mpi_file_count; /* Number of file "objects" to transfer */ - hsize_t *permute_map = NULL; /* array that holds the mapping from the old, - out-of-order displacements to the in-order - displacements of the MPI datatypes of the - point selection of the file space */ - hbool_t is_permuted = FALSE; - - /* Obtain disk and memory MPI derived datatype */ - /* NOTE: The permute_map array can be allocated within H5S_mpio_space_type - * and will be fed into the next call to H5S_mpio_space_type - * where it will be freed. - */ - if(H5S_mpio_space_type(file_space, type_info->src_type_size, - &mpi_file_type, &mpi_file_count, &mft_is_derived, /* OUT: datatype created */ - TRUE, /* this is a file space, so - permute the datatype if the - point selection is out of - order */ - &permute_map, /* OUT: a map to indicate - the permutation of - points selected in - case they are out of - order */ - &is_permuted /* OUT */) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type") - /* Sanity check */ - if(is_permuted) - HDassert(permute_map); - if(H5S_mpio_space_type(mem_space, type_info->src_type_size, - &mpi_buf_type, &mpi_buf_count, &mbt_is_derived, /* OUT: datatype created */ - FALSE, /* this is a memory space, so if - the file space is not - permuted, there is no need to - permute the datatype if the - point selections are out of - order*/ - &permute_map /* IN: the permutation map - generated by the - file_space selection - and applied to the - memory selection */, - &is_permuted /* IN */) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type") - /* Sanity check */ - if(is_permuted) - HDassert(!permute_map); - } /* end if */ - else { - /* For non-selection, participate with a none MPI derived datatype, the count is 0. */ - mpi_buf_type = MPI_BYTE; - mpi_file_type = MPI_BYTE; - mpi_buf_count = 0; - mbt_is_derived = FALSE; - mft_is_derived = FALSE; - } /* end else */ - -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"before final collective IO \n"); -#endif - - /* Perform final collective I/O operation */ - if(H5D__final_collective_io(io_info, type_info, (hsize_t)mpi_buf_count, &mpi_file_type, &mpi_buf_type) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish collective MPI-IO") - -done: - /* Free the MPI buf and file types, if they were derived */ - if(mbt_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mpi_buf_type))) - HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) - if(mft_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mpi_file_type))) - HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) - -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D),"before leaving inter_collective_io ret_value = %d\n",ret_value); -#endif - - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__inter_collective_io() */ +} /* end H5D__all_piece_collective_io */ /*------------------------------------------------------------------------- @@ -1428,7 +757,7 @@ if(H5DEBUG(D)) *------------------------------------------------------------------------- */ static herr_t -H5D__final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, +H5D__final_collective_io(H5D_io_info_t *io_info, hsize_t mpi_buf_count, MPI_Datatype *mpi_file_type, MPI_Datatype *mpi_buf_type) { herr_t ret_value = SUCCEED; @@ -1440,11 +769,11 @@ H5D__final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties") if(io_info->op_type == H5D_IO_OP_WRITE) { - if((io_info->io_ops.single_write)(io_info, type_info, mpi_buf_count, NULL, NULL) < 0) + if((io_info->io_ops.single_write_md)(io_info, mpi_buf_count, NULL, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed") } /* end if */ else { - if((io_info->io_ops.single_read)(io_info, type_info, mpi_buf_count, NULL, NULL) < 0) + if((io_info->io_ops.single_read_md)(io_info, mpi_buf_count, NULL, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed") } /* end else */ @@ -1478,364 +807,95 @@ H5D__cmp_chunk_addr(const void *chunk_addr_info1, const void *chunk_addr_info2) FUNC_ENTER_STATIC_NOERR - addr1 = ((const H5D_chunk_addr_info_t *)chunk_addr_info1)->chunk_addr; - addr2 = ((const H5D_chunk_addr_info_t *)chunk_addr_info2)->chunk_addr; + addr1 = ((const H5D_chunk_addr_info_t *)chunk_addr_info1)->piece_addr; + addr2 = ((const H5D_chunk_addr_info_t *)chunk_addr_info2)->piece_addr; FUNC_LEAVE_NOAPI(H5F_addr_cmp(addr1, addr2)) } /* end H5D__cmp_chunk_addr() */ /*------------------------------------------------------------------------- - * Function: H5D__sort_chunk - * - * Purpose: Routine to sort chunks in increasing order of chunk address - * Each chunk address is also obtained. - * - * Description: - * For most cases, the chunk address has already been sorted in increasing order. - * The special sorting flag is used to optimize this common case. - * quick sort is used for necessary sorting. - * - * Parameters: - * Input: H5D_io_info_t* io_info, - * H5D_chunk_map_t *fm(global chunk map struct) - * Input/Output: H5D_chunk_addr_info_t chunk_addr_info_array[] : array to store chunk address and information - * many_chunk_opt : flag to optimize the way to obtain chunk addresses - * for many chunks + * Function: H5D_match_coll_calls * - * Return: Non-negative on success/Negative on failure + * Purpose: For processes that are not reading/writing to a particular + * datasets through the multi-dataset interface, but are participating + * in the collective call, match the collective I/O calls from the + * other processes. * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 + * Return: non-negative on success, negative on failure. * *------------------------------------------------------------------------- */ -static herr_t -H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm, - H5D_chunk_addr_info_t chunk_addr_info_array[], int sum_chunk) +herr_t +H5D__match_coll_calls(hid_t file_id, H5P_genplist_t *plist, hbool_t do_read) { - H5SL_node_t *chunk_node; /* Current node in chunk skip list */ - H5D_chunk_info_t *chunk_info; /* Current chunking info. of this node. */ - haddr_t chunk_addr; /* Current chunking address of this node */ - haddr_t *total_chunk_addr_array = NULL; /* The array of chunk address for the total number of chunk */ - hbool_t do_sort = FALSE; /* Whether the addresses need to be sorted */ - int bsearch_coll_chunk_threshold; - int many_chunk_opt = H5D_OBTAIN_ONE_CHUNK_ADDR_IND; - int mpi_size; /* Number of MPI processes */ - int mpi_code; /* MPI return code */ - int i; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_STATIC - - /* Retrieve # of MPI processes */ - if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0) - HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size") - - /* Calculate the actual threshold to obtain all chunk addresses collectively - * The bigger this number is, the more possible the use of obtaining chunk - * address collectively. - */ - /* For non-optimization one-link IO, actual bsearch threshold is always - * 0, we would always want to obtain the chunk addresses individually - * for each process. - */ - bsearch_coll_chunk_threshold = (sum_chunk * 100) / ((int)fm->layout->u.chunk.nchunks * mpi_size); - if((bsearch_coll_chunk_threshold > H5D_ALL_CHUNK_ADDR_THRES_COL) - && ((sum_chunk / mpi_size) >= H5D_ALL_CHUNK_ADDR_THRES_COL_NUM)) - many_chunk_opt = H5D_OBTAIN_ALL_CHUNK_ADDR_COL; - -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D), "many_chunk_opt= %d\n", many_chunk_opt); -#endif - - /* If we need to optimize the way to obtain the chunk address */ - if(many_chunk_opt != H5D_OBTAIN_ONE_CHUNK_ADDR_IND) { - int mpi_rank; - -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D), "Coming inside H5D_OBTAIN_ALL_CHUNK_ADDR_COL\n"); -#endif - /* Allocate array for chunk addresses */ - if(NULL == (total_chunk_addr_array = H5MM_malloc(sizeof(haddr_t) * (size_t)fm->layout->u.chunk.nchunks))) - HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory chunk address array") - - /* Retrieve all the chunk addresses with process 0 */ - if((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0) - HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank") - - if(mpi_rank == 0) { - if(H5D__chunk_addrmap(io_info, total_chunk_addr_array) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") - } /* end if */ - - /* Broadcasting the MPI_IO option info. and chunk address info. */ - if(MPI_SUCCESS != (mpi_code = MPI_Bcast(total_chunk_addr_array, (int)(sizeof(haddr_t) * fm->layout->u.chunk.nchunks), MPI_BYTE, (int)0, io_info->comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_BCast failed", mpi_code) - } /* end if */ - - /* Start at first node in chunk skip list */ - i = 0; - if(NULL == (chunk_node = H5SL_first(fm->sel_chunks))) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk node from skipped list") - - /* Iterate over all chunks for this process */ - while(chunk_node) { - if(NULL == (chunk_info = H5SL_item(chunk_node))) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL,"couldn't get chunk info from skipped list") - - if(many_chunk_opt == H5D_OBTAIN_ONE_CHUNK_ADDR_IND) { - H5D_chunk_ud_t udata; /* User data for querying chunk info */ - - /* Get address of chunk */ - if(H5D__chunk_lookup(io_info->dset, io_info->md_dxpl_id, chunk_info->scaled, &udata) < 0) - HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list") - chunk_addr = udata.chunk_block.offset; - } /* end if */ - else - chunk_addr = total_chunk_addr_array[chunk_info->index]; - - /* Check if chunk addresses are not in increasing order in the file */ - if(i > 0 && chunk_addr < chunk_addr_info_array[i - 1].chunk_addr) - do_sort = TRUE; - - /* Set the address & info for this chunk */ - chunk_addr_info_array[i].chunk_addr = chunk_addr; - chunk_addr_info_array[i].chunk_info = *chunk_info; - - /* Advance to next chunk in list */ - i++; - chunk_node = H5SL_next(chunk_node); - } /* end while */ + int local_cause = 0; + int global_cause = 0; + int mpi_code; + H5F_t *file; + H5FD_mpio_collective_opt_t para_io_mode; + H5FD_mpio_xfer_t xfer_mode; + herr_t ret_value = SUCCEED; -#ifdef H5D_DEBUG -if(H5DEBUG(D)) - HDfprintf(H5DEBUG(D), "before Qsort\n"); -#endif - if(do_sort) { - size_t num_chunks = H5SL_count(fm->sel_chunks); + FUNC_ENTER_PACKAGE - HDqsort(chunk_addr_info_array, num_chunks, sizeof(chunk_addr_info_array[0]), H5D__cmp_chunk_addr); - } /* end if */ + HDassert(file_id > 0); -done: - if(total_chunk_addr_array) - H5MM_xfree(total_chunk_addr_array); + /* Get the transfer mode */ + if(H5P_get(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to get value") + HDassert(xfer_mode == H5FD_MPIO_COLLECTIVE); - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__sort_chunk() */ + /* get parallel io mode */ + if(H5P_get(plist, H5D_XFER_MPIO_COLLECTIVE_OPT_NAME, ¶_io_mode) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to get value") - -/*------------------------------------------------------------------------- - * Function: H5D__obtain_mpio_mode - * - * Purpose: Routine to obtain each io mode(collective,independent or none) for each chunk; - * Each chunk address is also obtained. - * - * Description: - * - * 1) Each process provides two piece of information for all chunks having selection - * a) chunk index - * b) wheather this chunk is regular(for MPI derived datatype not working case) - * - * 2) Gather all the information to the root process - * - * 3) Root process will do the following: - * a) Obtain chunk addresses for all chunks in this data space - * b) With the consideration of the user option, calculate IO mode for each chunk - * c) Build MPI derived datatype to combine "chunk address" and "assign_io" information - * in order to do MPI Bcast only once - * d) MPI Bcast the IO mode and chunk address information for each chunk. - * 4) Each process then retrieves IO mode and chunk address information to assign_io_mode and chunk_addr. - * - * Parameters: - * - * Input: H5D_io_info_t* io_info, - * H5D_chunk_map_t *fm,(global chunk map struct) - * Output: uint8_t assign_io_mode[], : IO mode, collective, independent or none - * haddr_t chunk_addr[], : chunk address array for each chunk - * - * Return: Non-negative on success/Negative on failure - * - * Programmer: Muqun Yang - * Monday, Feb. 13th, 2006 - * - *------------------------------------------------------------------------- - */ -static herr_t -H5D__obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm, - H5P_genplist_t *dx_plist, uint8_t assign_io_mode[], haddr_t chunk_addr[]) -{ - int total_chunks; - unsigned percent_nproc_per_chunk, threshold_nproc_per_chunk; - uint8_t* io_mode_info = NULL; - uint8_t* recv_io_mode_info = NULL; - uint8_t* mergebuf = NULL; - uint8_t* tempbuf; - H5SL_node_t* chunk_node; - H5D_chunk_info_t* chunk_info; - int mpi_size, mpi_rank; - MPI_Comm comm; - int ic, root; - int mpi_code; -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY - int new_value; - htri_t check_prop; -#endif - herr_t ret_value = SUCCEED; + if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file") - FUNC_ENTER_STATIC + /* just to match up with MPI_Allreduce from H5D__mpio_opt_possible() */ + if(MPI_SUCCESS != (mpi_code = MPI_Allreduce(&local_cause, &global_cause, 1, + MPI_INT, MPI_BOR, H5F_mpi_get_comm(file)))) + HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code) - /* Assign the rank 0 to the root */ - root = 0; - comm = io_info->comm; - - /* Obtain the number of process and the current rank of the process */ - if((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0) - HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank") - if((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0) - HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size") - - /* Setup parameters */ - H5_CHECKED_ASSIGN(total_chunks, int, fm->layout->u.chunk.nchunks, hsize_t); - if(H5P_get(dx_plist, H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME, &percent_nproc_per_chunk) < 0) - HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't get percent nproc per chunk") - /* if ratio is 0, perform collective io */ - if(0 == percent_nproc_per_chunk) { - if(H5D__chunk_addrmap(io_info, chunk_addr) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address"); - for(ic = 0; ic < total_chunks; ic++) - assign_io_mode[ic] = H5D_CHUNK_IO_MODE_COL; - - HGOTO_DONE(SUCCEED) - } /* end if */ - threshold_nproc_per_chunk = mpi_size * percent_nproc_per_chunk/100; - - /* Allocate memory */ - if(NULL == (io_mode_info = (uint8_t *)H5MM_calloc(total_chunks))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate I/O mode info buffer") - if(NULL == (mergebuf = H5MM_malloc((sizeof(haddr_t) + 1) * total_chunks))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate mergebuf buffer") - tempbuf = mergebuf + total_chunks; - if(mpi_rank == root) - if(NULL == (recv_io_mode_info = (uint8_t *)H5MM_malloc(total_chunks * mpi_size))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate recv I/O mode info buffer") - - /* Obtain the regularity and selection information for all chunks in this process. */ - chunk_node = H5SL_first(fm->sel_chunks); - while(chunk_node) { - chunk_info = H5SL_item(chunk_node); - - io_mode_info[chunk_info->index] = H5D_CHUNK_SELECT_REG; /* this chunk is selected and is "regular" */ - chunk_node = H5SL_next(chunk_node); - } /* end while */ - - /* Gather all the information */ - if(MPI_SUCCESS != (mpi_code = MPI_Gather(io_mode_info, total_chunks, MPI_BYTE, recv_io_mode_info, total_chunks, MPI_BYTE, root, comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code) - - /* Calculate the mode for IO(collective, independent or none) at root process */ - if(mpi_rank == root) { - int nproc; - int* nproc_per_chunk; - - /* pre-computing: calculate number of processes and - regularity of the selection occupied in each chunk */ - if(NULL == (nproc_per_chunk = (int*)H5MM_calloc(total_chunks * sizeof(int)))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate nproc_per_chunk buffer") - - /* calculating the chunk address */ - if(H5D__chunk_addrmap(io_info, chunk_addr) < 0) { - H5MM_free(nproc_per_chunk); - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") + /* if collective mode is not broken according to the + * H5D__mpio_opt_possible, since the below MPI funcs will be + * called only in collective mode */ + if(!global_cause) { + MPI_Status mpi_stat; + MPI_File mpi_fh_p; + MPI_File mpi_fh; + + if(H5F_get_mpi_handle(file, (MPI_File **)&mpi_fh_p) < 0) + HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get mpi file handle") + mpi_fh = *(MPI_File*)mpi_fh_p; + + /* just to match up with the 1st MPI_File_set_view from H5FD_mpio_read() */ + if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, + MPI_BYTE, "native", MPI_INFO_NULL))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) + + /* just to match up with MPI_File_write_at_all from H5FD_mpio_read() */ + if(para_io_mode == H5FD_MPIO_COLLECTIVE_IO) { + HDmemset(&mpi_stat, 0, sizeof(MPI_Status)); + if(do_read) { + if(MPI_SUCCESS != (mpi_code = MPI_File_read_at_all(mpi_fh, 0, NULL, 0, MPI_BYTE, &mpi_stat))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at_all failed", mpi_code) + } + else { + if(MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(mpi_fh, 0, NULL, 0, MPI_BYTE, &mpi_stat))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code) + } } /* end if */ - /* checking for number of process per chunk and regularity of the selection*/ - for(nproc = 0; nproc < mpi_size; nproc++) { - uint8_t *tmp_recv_io_mode_info = recv_io_mode_info + (nproc * total_chunks); - - /* Calculate the number of process per chunk and adding irregular selection option */ - for(ic = 0; ic < total_chunks; ic++, tmp_recv_io_mode_info++) { - if(*tmp_recv_io_mode_info != 0) { - nproc_per_chunk[ic]++; - } /* end if */ - } /* end for */ - } /* end for */ - - /* Calculating MPIO mode for each chunk (collective, independent, none) */ - for(ic = 0; ic < total_chunks; ic++) { - if(nproc_per_chunk[ic] > MAX(1, threshold_nproc_per_chunk)) { - assign_io_mode[ic] = H5D_CHUNK_IO_MODE_COL; - } /* end if */ - } /* end for */ - - - /* merge buffer io_mode info and chunk addr into one */ - HDmemcpy(mergebuf, assign_io_mode, total_chunks); - HDmemcpy(tempbuf, chunk_addr, sizeof(haddr_t) * total_chunks); - - H5MM_free(nproc_per_chunk); - } /* end if */ - - /* Broadcasting the MPI_IO option info. and chunk address info. */ - if(MPI_SUCCESS != (mpi_code = MPI_Bcast(mergebuf, ((sizeof(haddr_t) + 1) * total_chunks), MPI_BYTE, root, comm))) - HMPI_GOTO_ERROR(FAIL, "MPI_BCast failed", mpi_code) - - HDmemcpy(assign_io_mode, mergebuf, total_chunks); - HDmemcpy(chunk_addr, tempbuf, sizeof(haddr_t) * total_chunks); - -#ifdef H5_HAVE_INSTRUMENTED_LIBRARY -{ - H5P_genplist_t *plist; /* Property list pointer */ - - /* Get the dataset transfer property list */ - if(NULL == (plist = (H5P_genplist_t *)H5I_object(io_info->raw_dxpl_id))) - HGOTO_ERROR(H5E_IO, H5E_BADTYPE, FAIL, "not a dataset transfer property list") - - check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME); - if(check_prop > 0) { - for(ic = 0; ic < total_chunks; ic++) { - if(assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) { - new_value = 0; - if(H5P_set(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &new_value) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value") - break; - } /* end if */ - } /* end for */ - } /* end if */ - - check_prop = H5P_exist_plist(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME); - if(check_prop > 0) { - int temp_count = 0; - - for(ic = 0; ic < total_chunks; ic++) { - if(assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) { - temp_count++; - break; - } /* end if */ - } /* end for */ - if(temp_count == 0) { - new_value = 0; - if(H5P_set(plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &new_value) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value") - } /* end if */ - } /* end if */ -} -#endif + /* just to match up with the 2nd MPI_File_set_view (reset) in H5FD_mpio_read() */ + if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, + "native", MPI_INFO_NULL))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) + } /* end if !global_cause */ done: - if(io_mode_info) - H5MM_free(io_mode_info); - if(mergebuf) - H5MM_free(mergebuf); - if(recv_io_mode_info) { - HDassert(mpi_rank == root); - H5MM_free(recv_io_mode_info); - } /* end if */ - FUNC_LEAVE_NOAPI(ret_value) -} /* end H5D__obtain_mpio_mode() */ -#endif /* H5_HAVE_PARALLEL */ +} /* H5FD_match_coll_calls */ +#endif /* H5_HAVE_PARALLEL */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 2609412..4d8461f 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -45,7 +45,7 @@ /* Set the minimum object header size to create objects with */ #define H5D_MINHDR_SIZE 256 - +#if 0 /* [Simple] Macro to construct a H5D_io_info_t from it's components */ #define H5D_BUILD_IO_INFO_WRT(io_info, ds, dxpl_c, dxpl_m, dxpl_r, str, buf) \ (io_info)->dset = ds; \ @@ -63,6 +63,7 @@ (io_info)->store = str; \ (io_info)->op_type = H5D_IO_OP_READ; \ (io_info)->u.rbuf = buf +#endif /* Flags for marking aspects of a dataset dirty */ #define H5D_MARK_SPACE 0x01 @@ -120,23 +121,27 @@ typedef struct H5D_type_info_t { /* Forward declaration of structs used below */ struct H5D_io_info_t; -struct H5D_chunk_map_t; +struct H5D_dset_info_t; /* Function pointers for I/O on particular types of dataset layouts */ typedef herr_t (*H5D_layout_construct_func_t)(H5F_t *f, H5D_t *dset); typedef herr_t (*H5D_layout_init_func_t)(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id); typedef hbool_t (*H5D_layout_is_space_alloc_func_t)(const H5O_storage_t *storage); -typedef herr_t (*H5D_layout_io_init_func_t)(const struct H5D_io_info_t *io_info, +typedef herr_t (*H5D_layout_io_init_func_t)(struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - struct H5D_chunk_map_t *cm); + struct H5D_dset_info_t *dinfo); typedef herr_t (*H5D_layout_read_func_t)(struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, struct H5D_chunk_map_t *fm); + const H5S_t *mem_space, struct H5D_dset_info_t *dinfo); typedef herr_t (*H5D_layout_write_func_t)(struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, struct H5D_chunk_map_t *fm); + const H5S_t *mem_space, struct H5D_dset_info_t *dinfo); +typedef herr_t (*H5D_layout_read_md_func_t)(const hid_t file_id, const size_t count, + struct H5D_io_info_t *io_info); +typedef herr_t (*H5D_layout_write_md_func_t)(const hid_t file_id, const size_t count, + struct H5D_io_info_t *io_info); typedef ssize_t (*H5D_layout_readvv_func_t)(const struct H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); @@ -144,7 +149,7 @@ typedef ssize_t (*H5D_layout_writevv_func_t)(const struct H5D_io_info_t *io_info size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); typedef herr_t (*H5D_layout_flush_func_t)(H5D_t *dataset, hid_t dxpl_id); -typedef herr_t (*H5D_layout_io_term_func_t)(const struct H5D_chunk_map_t *cm); +typedef herr_t (*H5D_layout_io_term_func_t)(struct H5D_io_info_t *io_info, struct H5D_dset_info_t *di); typedef herr_t (*H5D_layout_dest_func_t)(H5D_t *dataset, hid_t dxpl_id); /* Typedef for grouping layout I/O routines */ @@ -156,13 +161,13 @@ typedef struct H5D_layout_ops_t { H5D_layout_read_func_t ser_read; /* High-level I/O routine for reading data in serial */ H5D_layout_write_func_t ser_write; /* High-level I/O routine for writing data in serial */ #ifdef H5_HAVE_PARALLEL - H5D_layout_read_func_t par_read; /* High-level I/O routine for reading data in parallel */ - H5D_layout_write_func_t par_write; /* High-level I/O routine for writing data in parallel */ + H5D_layout_read_md_func_t par_read; /* High-level I/O routine for reading data in parallel */ + H5D_layout_write_md_func_t par_write; /* High-level I/O routine for writing data in parallel */ #endif /* H5_HAVE_PARALLEL */ H5D_layout_readvv_func_t readvv; /* Low-level I/O routine for reading data */ H5D_layout_writevv_func_t writevv; /* Low-level I/O routine for writing data */ H5D_layout_flush_func_t flush; /* Low-level I/O routine for flushing raw data */ - H5D_layout_io_term_func_t io_term; /* I/O shutdown routine */ + H5D_layout_io_term_func_t io_term; /* I/O shutdown routine for multi-dset */ H5D_layout_dest_func_t dest; /* Destroy layout info */ } H5D_layout_ops_t; @@ -174,12 +179,21 @@ typedef herr_t (*H5D_io_single_write_func_t)(const struct H5D_io_info_t *io_info const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space); +typedef herr_t (*H5D_io_single_read_md_func_t)(const struct H5D_io_info_t *io_info, hsize_t nelmts, + const H5S_t *file_space, const H5S_t *mem_space); +typedef herr_t (*H5D_io_single_write_md_func_t)(const struct H5D_io_info_t *io_info, hsize_t nelmts, + const H5S_t *file_space, const H5S_t *mem_space); + /* Typedef for raw data I/O framework info */ typedef struct H5D_io_ops_t { H5D_layout_read_func_t multi_read; /* High-level I/O routine for reading data */ H5D_layout_write_func_t multi_write; /* High-level I/O routine for writing data */ H5D_io_single_read_func_t single_read; /* I/O routine for reading single block */ H5D_io_single_write_func_t single_write; /* I/O routine for writing single block */ + H5D_layout_read_md_func_t multi_read_md; /* High-level I/O routine for reading data for multi-dset */ + H5D_layout_write_md_func_t multi_write_md; /* High-level I/O routine for writing data for multi-dset */ + H5D_io_single_read_md_func_t single_read_md; /* I/O routine for reading single block for multi-dset */ + H5D_io_single_write_md_func_t single_write_md; /* I/O routine for writing single block for multi-dset */ } H5D_io_ops_t; /* Typedefs for dataset storage information */ @@ -210,8 +224,63 @@ typedef enum H5D_io_op_type_t { H5D_IO_OP_WRITE /* Write operation */ } H5D_io_op_type_t; +/* piece info for multiple dsets. */ +typedef struct H5D_piece_info_t { + haddr_t faddr; /* file addr. key of skip list */ + hsize_t index; /* "Index" of chunk in dataset */ + uint32_t piece_points; /* Number of elements selected in piece */ + hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled coordinates of chunk (in file dataset's dataspace) */ + const H5S_t *fspace; /* Dataspace describing chunk & selection in it */ + unsigned fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */ + const H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */ + unsigned mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */ + struct H5D_dset_info_t *dset_info; /* Pointer to dset_info */ +} H5D_piece_info_t; + +/* Union for read/write dataset buffers */ +typedef union H5D_dset_buf_t { + void *rbuf; /* Pointer to buffer for read */ + const void *wbuf; /* Pointer to buffer to write */ +} H5D_dset_buf_t; + +/* dset info for multiple dsets */ +typedef struct H5D_dset_info_t { + H5D_t *dset; /* Pointer to dataset being operated on */ + H5D_storage_t *store; /* Dataset storage info */ + H5D_layout_ops_t layout_ops; /* Dataset layout I/O operation function pointers */ + H5D_dset_buf_t u; /* Buffer pointer */ + + H5O_layout_t *layout; /* Dataset layout information*/ + hsize_t nelmts; /* Number of elements selected in file & memory dataspaces */ + + const H5S_t *file_space; /* Pointer to the file dataspace */ + H5S_sel_type fsel_type; /* Selection type in file */ + unsigned f_ndims; /* Number of dimensions for file dataspace */ + hsize_t f_dims[H5O_LAYOUT_NDIMS]; /* File dataspace dimensions */ + + const H5S_t *mem_space; /* Pointer to the memory dataspace */ + H5S_t *mchunk_tmpl; /* Dataspace template for new memory chunks */ + H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */ + unsigned m_ndims; /* Number of dimensions for memory dataspace */ + H5S_sel_type msel_type; /* Selection type in memory */ + + H5SL_t *dset_sel_pieces; /* Skiplist of selected pieces in this dataset, indexed by index */ + + H5S_t *single_space; /* Dataspace for single chunk */ + H5D_piece_info_t *single_piece_info; + hbool_t use_single; /* Whether I/O is on a single element */ + + hsize_t last_index; /* Index of last chunk operated on */ + H5D_piece_info_t *last_piece_info; /* Pointer to last piece's info */ + + hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */ + + hid_t mem_type_id; /* memory datatype ID */ + H5D_type_info_t type_info; + hbool_t type_info_init; +} H5D_dset_info_t; + typedef struct H5D_io_info_t { - const H5D_t *dset; /* Pointer to dataset being operated on */ #ifndef H5_HAVE_PARALLEL const #endif /* H5_HAVE_PARALLEL */ @@ -227,16 +296,26 @@ typedef struct H5D_io_info_t { H5D_io_ops_t io_ops; /* I/O operation function pointers */ } orig; #endif /* H5_HAVE_PARALLEL */ - H5D_storage_t *store; /* Dataset storage info */ - H5D_layout_ops_t layout_ops; /* Dataset layout I/O operation function pointers */ H5D_io_ops_t io_ops; /* I/O operation function pointers */ H5D_io_op_type_t op_type; - union { - void *rbuf; /* Pointer to buffer for read */ - const void *wbuf; /* Pointer to buffer to write */ - } u; + + H5D_t *dset; /* Pointer to dataset being operated on */ + H5D_dset_info_t *dsets_info; /* dsets info where I/O is done to/from */ + H5SL_t *sel_pieces; /* Skip list containing information for each piece selected */ + + haddr_t store_faddr; /* lowest file addr for read/write */ + const void * base_maddr_w; /* start mem addr for write */ + void * base_maddr_r; /* start mem addr for read */ + + hbool_t is_coll_broken; /* is collective mode broken? */ } H5D_io_info_t; +/* created to pass both at once for callback func */ +typedef struct H5D_io_info_wrap_t { + H5D_io_info_t *io_info; + H5D_dset_info_t *dinfo; +} H5D_io_info_wrap_t; + /******************/ /* Chunk typedefs */ @@ -342,48 +421,6 @@ typedef struct H5D_chunk_ops_t { H5D_chunk_dest_func_t dest; /* Routine to destroy indexing information in memory */ } H5D_chunk_ops_t; -/* Structure holding information about a chunk's selection for mapping */ -typedef struct H5D_chunk_info_t { - hsize_t index; /* "Index" of chunk in dataset */ - uint32_t chunk_points; /* Number of elements selected in chunk */ - hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled coordinates of chunk (in file dataset's dataspace) */ - H5S_t *fspace; /* Dataspace describing chunk & selection in it */ - hbool_t fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */ - H5S_t *mspace; /* Dataspace describing selection in memory corresponding to this chunk */ - hbool_t mspace_shared; /* Indicate that the memory space for a chunk is shared and shouldn't be freed */ -} H5D_chunk_info_t; - -/* Main structure holding the mapping between file chunks and memory */ -typedef struct H5D_chunk_map_t { - H5O_layout_t *layout; /* Dataset layout information*/ - hsize_t nelmts; /* Number of elements selected in file & memory dataspaces */ - - const H5S_t *file_space; /* Pointer to the file dataspace */ - unsigned f_ndims; /* Number of dimensions for file dataspace */ - - const H5S_t *mem_space; /* Pointer to the memory dataspace */ - H5S_t *mchunk_tmpl; /* Dataspace template for new memory chunks */ - H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */ - unsigned m_ndims; /* Number of dimensions for memory dataspace */ - H5S_sel_type msel_type; /* Selection type in memory */ - H5S_sel_type fsel_type; /* Selection type in file */ - - H5SL_t *sel_chunks; /* Skip list containing information for each chunk selected */ - - H5S_t *single_space; /* Dataspace for single chunk */ - H5D_chunk_info_t *single_chunk_info; /* Pointer to single chunk's info */ - hbool_t use_single; /* Whether I/O is on a single element */ - - hsize_t last_index; /* Index of last chunk operated on */ - H5D_chunk_info_t *last_chunk_info; /* Pointer to last chunk's info */ - - hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */ - -#ifdef H5_HAVE_PARALLEL - H5D_chunk_info_t **select_chunk; /* Store the information about whether this chunk is selected or not */ -#endif /* H5_HAVE_PARALLEL */ -} H5D_chunk_map_t; - /* Cached information about a particular chunk */ typedef struct H5D_chunk_cached_t { hbool_t valid; /*whether cache info is valid*/ @@ -415,7 +452,7 @@ typedef struct H5D_rdcc_t { struct H5D_rdcc_ent_t **slot; /* Chunk slots, each points to a chunk*/ H5SL_t *sel_chunks; /* Skip list containing information for each chunk selected */ H5S_t *single_space; /* Dataspace for single element I/O on chunks */ - H5D_chunk_info_t *single_chunk_info; /* Pointer to single chunk's info */ + H5D_piece_info_t *single_piece_info; /* Pointer to single piece's info */ /* Cached information about scaled dataspace dimensions */ hsize_t scaled_dims[H5S_MAX_RANK]; /* The scaled dim sizes */ @@ -463,6 +500,7 @@ typedef struct H5D_shared_t { * dataset in certain circumstances) */ H5D_rdcc_t chunk; /* Information about chunked data */ + H5SL_t *sel_pieces; /* Skip list containing information for each piece selected */ } cache; char *extfile_prefix; /* expanded external file prefix */ @@ -580,13 +618,11 @@ H5_DLL herr_t H5D__mark(const H5D_t *dataset, hid_t dxpl_id, unsigned flags); H5_DLL herr_t H5D_set_io_info_dxpls(H5D_io_info_t *io_info, hid_t dxpl_id); #endif /* H5_DEBUG_BUILD */ -/* Internal I/O routines */ -H5_DLL herr_t H5D__read(H5D_t *dataset, hid_t mem_type_id, - const H5S_t *mem_space, const H5S_t *file_space, hid_t dset_xfer_plist, - void *buf/*out*/); -H5_DLL herr_t H5D__write(H5D_t *dataset, hid_t mem_type_id, - const H5S_t *mem_space, const H5S_t *file_space, hid_t dset_xfer_plist, - const void *buf); +/* Functions to do I/O */ +H5_DLL herr_t H5D__read(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info); +H5_DLL herr_t H5D__write(hid_t file_id, hid_t dxpl_id, size_t count, + H5D_dset_info_t *dset_info); /* Functions that perform direct serial I/O operations */ H5_DLL herr_t H5D__select_read(const H5D_io_info_t *io_info, @@ -629,10 +665,10 @@ H5_DLL hbool_t H5D__contig_is_space_alloc(const H5O_storage_t *storage); H5_DLL herr_t H5D__contig_fill(const H5D_io_info_t *io_info); H5_DLL herr_t H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *fm); + H5D_dset_info_t *dinfo); H5_DLL herr_t H5D__contig_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *fm); + H5D_dset_info_t *dinfo); H5_DLL herr_t H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src, H5F_t *f_dst, H5O_storage_contig_t *storage_dst, H5T_t *src_dtype, H5O_copy_t *cpy_info, hid_t dxpl_id); @@ -640,8 +676,8 @@ H5_DLL herr_t H5D__contig_delete(H5F_t *f, hid_t dxpl_id, const H5O_storage_t *store); /* Functions that operate on chunked dataset storage */ -H5_DLL htri_t H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, - hbool_t write_op); +H5_DLL htri_t H5D__chunk_cacheable(const H5D_io_info_t *io_info, H5D_dset_info_t *dset_info, + haddr_t caddr, hbool_t write_op); H5_DLL herr_t H5D__chunk_create(const H5D_t *dset /*in,out*/, hid_t dxpl_id); H5_DLL herr_t H5D__chunk_set_info(const H5D_t *dset); H5_DLL hbool_t H5D__chunk_is_space_alloc(const H5O_storage_t *storage); @@ -713,41 +749,35 @@ H5_DLL herr_t H5D__fill_term(H5D_fill_buf_info_t *fb_info); #define H5Dmpio_DEBUG #endif /*H5Dmpio_DEBUG*/ #endif/*H5S_DEBUG*/ -/* MPI-IO function to read, it will select either regular or irregular read */ + +/* MPI-IO function to read multi-dsets (Chunk, Contig), it will select either + * regular or irregular read */ H5_DLL herr_t H5D__mpio_select_read(const H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space); - -/* MPI-IO function to write, it will select either regular or irregular read */ +/* MPI-IO function to write multi-dsets (Chunk, Contig), it will select either + * regular or irregular write */ H5_DLL herr_t H5D__mpio_select_write(const H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space); -/* MPI-IO functions to handle contiguous collective IO */ -H5_DLL herr_t H5D__contig_collective_read(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, H5D_chunk_map_t *fm); -H5_DLL herr_t H5D__contig_collective_write(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, H5D_chunk_map_t *fm); +/* MPI-IO functions to handle collective IO for multiple dsets (CONTIG, CHUNK) */ +H5_DLL herr_t H5D__collective_read(const hid_t file_id, const size_t count, H5D_io_info_t *io_info); +H5_DLL herr_t H5D__collective_write(const hid_t file_id, const size_t count, H5D_io_info_t *io_info); -/* MPI-IO functions to handle chunked collective IO */ -H5_DLL herr_t H5D__chunk_collective_read(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, H5D_chunk_map_t *fm); -H5_DLL herr_t H5D__chunk_collective_write(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, H5D_chunk_map_t *fm); /* MPI-IO function to check if a direct I/O transfer is possible between * memory and the file */ -H5_DLL htri_t H5D__mpio_opt_possible(const H5D_io_info_t *io_info, - const H5S_t *file_space, const H5S_t *mem_space, - const H5D_type_info_t *type_info, const H5D_chunk_map_t *fm, - H5P_genplist_t *dx_plist); +H5_DLL htri_t H5D__mpio_opt_possible(const size_t count, H5D_io_info_t *io_info, + H5P_genplist_t *dx_plist); + +/* function to invoke collective I/O calls for ranks that have no I/O + on a dataset to match other ranks' collective calls */ +H5_DLL herr_t H5D__match_coll_calls(hid_t file_id, H5P_genplist_t *plist, hbool_t do_read); #endif /* H5_HAVE_PARALLEL */ +/* for both CHUNK and CONTIG dset skiplist free (sel_pieces) for layout_ops.io_term. */ +H5_DLL herr_t H5D__piece_io_term(H5D_io_info_t *io_info, H5D_dset_info_t *di); + /* Testing functions */ #ifdef H5D_TESTING H5_DLL herr_t H5D__layout_version_test(hid_t did, unsigned *version); diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h index ab60a50..03a573c 100644 --- a/src/H5Dprivate.h +++ b/src/H5Dprivate.h @@ -85,11 +85,6 @@ #ifdef H5_HAVE_INSTRUMENTED_LIBRARY /* Collective chunk instrumentation properties */ #define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard" -#define H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME "coll_chunk_multi_hard" -#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_true" -#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_false" -#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME "coll_chunk_multi_coll" -#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME "coll_chunk_multi_ind" /* Definitions for all collective chunk instrumentation properties */ #define H5D_XFER_COLL_CHUNK_SIZE sizeof(unsigned) diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 07f8dbe..3d61ac6 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -25,6 +25,7 @@ #include "H5public.h" #include "H5Ipublic.h" + /*****************/ /* Public Macros */ /*****************/ @@ -43,6 +44,7 @@ #define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME "direct_chunk_offset" #define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME "direct_chunk_datasize" + /*******************/ /* Public Typedefs */ /*******************/ @@ -100,6 +102,19 @@ typedef enum H5D_fill_value_t { H5D_FILL_VALUE_USER_DEFINED =2 } H5D_fill_value_t; +/* parameter sturct for multi-dset Read/Write */ +typedef struct H5D_rw_multi_t +{ + hid_t dset_id; /* dataset ID */ + hid_t dset_space_id; /* dataset selection dataspace ID */ + hid_t mem_type_id; /* memory datatype ID */ + hid_t mem_space_id; /* memory selection dataspace ID */ + union { + void *rbuf; /* pointer to read buffer */ + const void *wbuf; /* pointer to write buffer */ + } u; +} H5D_rw_multi_t; + /* Values for VDS bounds option */ typedef enum H5D_vds_view_t { H5D_VDS_ERROR = -1, @@ -111,6 +126,7 @@ typedef enum H5D_vds_view_t { /* Public Variables */ /********************/ + /*********************/ /* Public Prototypes */ /*********************/ @@ -145,15 +161,17 @@ H5_DLL hid_t H5Dget_access_plist(hid_t dset_id); H5_DLL hsize_t H5Dget_storage_size(hid_t dset_id); H5_DLL haddr_t H5Dget_offset(hid_t dset_id); H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, - hid_t file_space_id, hid_t plist_id, void *buf/*out*/); + hid_t file_space_id, hid_t plist_id, void *buf/*out*/); +H5_DLL herr_t H5Dread_multi(hid_t dxpl_id, size_t count, H5D_rw_multi_t *info); H5_DLL herr_t H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, - hid_t file_space_id, hid_t plist_id, const void *buf); + hid_t file_space_id, hid_t plist_id, const void *buf); +H5_DLL herr_t H5Dwrite_multi(hid_t dxpl_id, size_t count, const H5D_rw_multi_t *info); H5_DLL herr_t H5Diterate(void *buf, hid_t type_id, hid_t space_id, - H5D_operator_t op, void *operator_data); + H5D_operator_t op, void *operator_data); H5_DLL herr_t H5Dvlen_reclaim(hid_t type_id, hid_t space_id, hid_t plist_id, void *buf); H5_DLL herr_t H5Dvlen_get_buf_size(hid_t dataset_id, hid_t type_id, hid_t space_id, hsize_t *size); H5_DLL herr_t H5Dfill(const void *fill, hid_t fill_type, void *buf, - hid_t buf_type, hid_t space); + hid_t buf_type, hid_t space); H5_DLL herr_t H5Dset_extent(hid_t dset_id, const hsize_t size[]); H5_DLL herr_t H5Dscatter(H5D_scatter_func_t op, void *op_data, hid_t type_id, hid_t dst_space_id, void *dst_buf); diff --git a/src/H5Dscatgath.c b/src/H5Dscatgath.c index 7c1abca..0395e52 100644 --- a/src/H5Dscatgath.c +++ b/src/H5Dscatgath.c @@ -97,6 +97,7 @@ H5D__scatter_file(const H5D_io_info_t *_io_info, const void *_buf) { H5D_io_info_t tmp_io_info; /* Temporary I/O info object */ + H5D_dset_info_t tmp_dset_info; /* Temporary I/O info object */ hsize_t _off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */ hsize_t *off = NULL; /* Pointer to sequence offsets */ hsize_t mem_off; /* Offset in memory */ @@ -120,8 +121,10 @@ H5D__scatter_file(const H5D_io_info_t *_io_info, /* Set up temporary I/O info object */ HDmemcpy(&tmp_io_info, _io_info, sizeof(*_io_info)); + HDmemcpy(&tmp_dset_info, &(_io_info->dsets_info[0]), sizeof(tmp_dset_info)); tmp_io_info.op_type = H5D_IO_OP_WRITE; - tmp_io_info.u.wbuf = _buf; + tmp_dset_info.u.wbuf = _buf; + tmp_io_info.dsets_info = &tmp_dset_info; /* Allocate the vector I/O arrays */ if(tmp_io_info.dxpl_cache->vec_size > H5D_IO_VECTOR_SIZE) { @@ -147,12 +150,12 @@ H5D__scatter_file(const H5D_io_info_t *_io_info, mem_off = 0; /* Write sequence list out */ - if((*tmp_io_info.layout_ops.writevv)(&tmp_io_info, nseq, &dset_curr_seq, + if((*tmp_dset_info.layout_ops.writevv)(&tmp_io_info, nseq, &dset_curr_seq, len, off, (size_t)1, &mem_curr_seq, &mem_len, &mem_off) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error") /* Update buffer */ - tmp_io_info.u.wbuf = (const uint8_t *)tmp_io_info.u.wbuf + orig_mem_len; + tmp_dset_info.u.wbuf = (const uint8_t *)tmp_dset_info.u.wbuf + orig_mem_len; /* Decrement number of elements left to process */ nelmts -= nelem; @@ -196,6 +199,7 @@ H5D__gather_file(const H5D_io_info_t *_io_info, void *_buf/*out*/) { H5D_io_info_t tmp_io_info; /* Temporary I/O info object */ + H5D_dset_info_t tmp_dset_info; /* Temporary I/O info object */ hsize_t _off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */ hsize_t *off = NULL; /* Pointer to sequence offsets */ hsize_t mem_off; /* Offset in memory */ @@ -212,8 +216,8 @@ H5D__gather_file(const H5D_io_info_t *_io_info, /* Check args */ HDassert(_io_info); - HDassert(_io_info->dset); - HDassert(_io_info->store); + HDassert(_io_info->dsets_info[0].dset); + HDassert(_io_info->dsets_info[0].store); HDassert(space); HDassert(iter); HDassert(nelmts > 0); @@ -221,8 +225,10 @@ H5D__gather_file(const H5D_io_info_t *_io_info, /* Set up temporary I/O info object */ HDmemcpy(&tmp_io_info, _io_info, sizeof(*_io_info)); + HDmemcpy(&tmp_dset_info, &(_io_info->dsets_info[0]), sizeof(tmp_dset_info)); tmp_io_info.op_type = H5D_IO_OP_READ; - tmp_io_info.u.rbuf = _buf; + tmp_dset_info.u.rbuf = _buf; + tmp_io_info.dsets_info = &tmp_dset_info; /* Allocate the vector I/O arrays */ if(tmp_io_info.dxpl_cache->vec_size > H5D_IO_VECTOR_SIZE) { @@ -248,12 +254,12 @@ H5D__gather_file(const H5D_io_info_t *_io_info, mem_off = 0; /* Read sequence list in */ - if((*tmp_io_info.layout_ops.readvv)(&tmp_io_info, nseq, &dset_curr_seq, + if((*tmp_dset_info.layout_ops.readvv)(&tmp_io_info, nseq, &dset_curr_seq, len, off, (size_t)1, &mem_curr_seq, &mem_len, &mem_off) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, 0, "read error") /* Update buffer */ - tmp_io_info.u.rbuf = (uint8_t *)tmp_io_info.u.rbuf + orig_mem_len; + tmp_dset_info.u.rbuf = (uint8_t *)tmp_dset_info.u.rbuf + orig_mem_len; /* Decrement number of elements left to process */ nelmts -= nelem; @@ -459,7 +465,7 @@ H5D__scatgath_read(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space) { const H5D_dxpl_cache_t *dxpl_cache = io_info->dxpl_cache; /* Local pointer to dataset transfer info */ - void *buf = io_info->u.rbuf; /* Local pointer to application buffer */ + void *buf = io_info->dsets_info[0].u.rbuf; /* Local pointer to application buffer */ H5S_sel_iter_t mem_iter; /*memory selection iteration info*/ hbool_t mem_iter_init = FALSE; /*memory selection iteration info has been initialized */ H5S_sel_iter_t bkg_iter; /*background iteration info*/ @@ -591,7 +597,7 @@ H5D__scatgath_write(const H5D_io_info_t *io_info, const H5D_type_info_t *type_in hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space) { const H5D_dxpl_cache_t *dxpl_cache = io_info->dxpl_cache; /* Local pointer to dataset transfer info */ - const void *buf = io_info->u.wbuf; /* Local pointer to application buffer */ + const void *buf = io_info->dsets_info[0].u.wbuf; /* Local pointer to application buffer */ H5S_sel_iter_t mem_iter; /*memory selection iteration info*/ hbool_t mem_iter_init = FALSE; /*memory selection iteration info has been initialized */ H5S_sel_iter_t bkg_iter; /*background iteration info*/ diff --git a/src/H5Dselect.c b/src/H5Dselect.c index 312beba..f130171 100644 --- a/src/H5Dselect.c +++ b/src/H5Dselect.c @@ -109,10 +109,10 @@ H5D__select_io(const H5D_io_info_t *io_info, size_t elmt_size, /* Check args */ HDassert(io_info); - HDassert(io_info->dset); - HDassert(io_info->store); + HDassert(io_info->dsets_info[0].dset); + HDassert(io_info->dsets_info[0].store); HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER)); - HDassert(io_info->u.rbuf); + HDassert(io_info->dsets_info[0].u.rbuf); /* Allocate the vector I/O arrays */ if(io_info->dxpl_cache->vec_size > H5D_IO_VECTOR_SIZE) { @@ -149,14 +149,14 @@ H5D__select_io(const H5D_io_info_t *io_info, size_t elmt_size, /* Perform I/O on memory and file sequences */ if(io_info->op_type == H5D_IO_OP_READ) { - if((tmp_file_len = (*io_info->layout_ops.readvv)(io_info, + if((tmp_file_len = (*io_info->dsets_info[0].layout_ops.readvv)(io_info, file_nseq, &curr_file_seq, file_len, file_off, mem_nseq, &curr_mem_seq, mem_len, mem_off)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, FAIL, "read error") } /* end if */ else { HDassert(io_info->op_type == H5D_IO_OP_WRITE); - if((tmp_file_len = (*io_info->layout_ops.writevv)(io_info, + if((tmp_file_len = (*io_info->dsets_info[0].layout_ops.writevv)(io_info, file_nseq, &curr_file_seq, file_len, file_off, mem_nseq, &curr_mem_seq, mem_len, mem_off)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error") @@ -207,14 +207,14 @@ H5D__select_io(const H5D_io_info_t *io_info, size_t elmt_size, /* Perform I/O on memory and file sequences */ if(io_info->op_type == H5D_IO_OP_READ) { - if((tmp_file_len = (*io_info->layout_ops.readvv)(io_info, + if((tmp_file_len = (*io_info->dsets_info[0].layout_ops.readvv)(io_info, file_nseq, &curr_file_seq, file_len, file_off, mem_nseq, &curr_mem_seq, mem_len, mem_off)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, FAIL, "read error") } /* end if */ else { HDassert(io_info->op_type == H5D_IO_OP_WRITE); - if((tmp_file_len = (*io_info->layout_ops.writevv)(io_info, + if((tmp_file_len = (*io_info->dsets_info[0].layout_ops.writevv)(io_info, file_nseq, &curr_file_seq, file_len, file_off, mem_nseq, &curr_mem_seq, mem_len, mem_off)) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error") diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c index c516c58..99ec81d 100644 --- a/src/H5Dvirtual.c +++ b/src/H5Dvirtual.c @@ -78,12 +78,12 @@ /********************/ /* Layout operation callbacks */ -static herr_t H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t - *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t *fm); -static herr_t H5D__virtual_write(H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, - const H5S_t *mem_space, H5D_chunk_map_t *fm); +static herr_t H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, + hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, + H5D_dset_info_t *dinfo); +static herr_t H5D__virtual_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, + hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, + H5D_dset_info_t *dinfo); static herr_t H5D__virtual_flush(H5D_t *dset, hid_t dxpl_id); /* Other functions */ @@ -101,16 +101,16 @@ static herr_t H5D__virtual_build_source_name(char *source_name, const H5O_storage_virtual_name_seg_t *parsed_name, size_t static_strlen, size_t nsubs, hsize_t blockno, char **built_name); static herr_t H5D__virtual_init_all(const H5D_t *dset, hid_t dxpl_id); -static herr_t H5D__virtual_pre_io(H5D_io_info_t *io_info, +static herr_t H5D__virtual_pre_io(H5D_dset_info_t *dset_info, H5O_storage_virtual_t *storage, const H5S_t *file_space, - const H5S_t *mem_space, hsize_t *tot_nelmts); + const H5S_t *mem_space, hsize_t *tot_nelmts, H5D_io_info_t *io_info); static herr_t H5D__virtual_post_io(H5O_storage_virtual_t *storage); -static herr_t H5D__virtual_read_one(H5D_io_info_t *io_info, +static herr_t H5D__virtual_read_one(H5D_dset_info_t *dset_info, const H5D_type_info_t *type_info, const H5S_t *file_space, - H5O_storage_virtual_srcdset_t *source_dset); -static herr_t H5D__virtual_write_one(H5D_io_info_t *io_info, + H5O_storage_virtual_srcdset_t *source_dset, hid_t dxpl_id); +static herr_t H5D__virtual_write_one(H5D_dset_info_t *dset_info, const H5D_type_info_t *type_info, const H5S_t *file_space, - H5O_storage_virtual_srcdset_t *source_dset); + H5O_storage_virtual_srcdset_t *source_dset, hid_t dxpl_id); /*********************/ @@ -2077,10 +2077,11 @@ H5D__virtual_is_space_alloc(const H5O_storage_t H5_ATTR_UNUSED *storage) *------------------------------------------------------------------------- */ static herr_t -H5D__virtual_pre_io(H5D_io_info_t *io_info, +H5D__virtual_pre_io(H5D_dset_info_t *dset_info, H5O_storage_virtual_t *storage, const H5S_t *file_space, - const H5S_t *mem_space, hsize_t *tot_nelmts) + const H5S_t *mem_space, hsize_t *tot_nelmts, H5D_io_info_t *io_info) { + const H5D_t *dset = dset_info->dset; /* Local pointer to dataset info */ hssize_t select_nelmts; /* Number of elements in selection */ hsize_t bounds_start[H5S_MAX_RANK]; /* Selection bounds start */ hsize_t bounds_end[H5S_MAX_RANK]; /* Selection bounds end */ @@ -2099,7 +2100,7 @@ H5D__virtual_pre_io(H5D_io_info_t *io_info, /* Initialize layout if necessary */ if(!storage->init) - if(H5D__virtual_init_all(io_info->dset, io_info->md_dxpl_id) < 0) + if(H5D__virtual_init_all(dset, io_info->md_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize virtual layout") /* Initialize tot_nelmts */ @@ -2119,7 +2120,7 @@ H5D__virtual_pre_io(H5D_io_info_t *io_info, /* Get selection bounds if necessary */ if(!bounds_init) { /* Get rank of VDS */ - if((rank = H5S_GET_EXTENT_NDIMS(io_info->dset->shared->space)) < 0) + if((rank = H5S_GET_EXTENT_NDIMS(dset->shared->space)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get number of dimensions") /* Get selection bounds */ @@ -2157,7 +2158,7 @@ H5D__virtual_pre_io(H5D_io_info_t *io_info, * open the source dataset to patch it */ if(storage->list[i].source_space_status != H5O_VIRTUAL_STATUS_CORRECT) { HDassert(!storage->list[i].sub_dset[j].dset); - if(H5D__virtual_open_source_dset(io_info->dset, &storage->list[i], &storage->list[i].sub_dset[j], io_info->md_dxpl_id) < 0) + if(H5D__virtual_open_source_dset(dset, &storage->list[i], &storage->list[i].sub_dset[j], io_info->md_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open source dataset") } /* end if */ @@ -2185,7 +2186,7 @@ H5D__virtual_pre_io(H5D_io_info_t *io_info, HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to modify size of data space") /* Get current VDS dimensions */ - if(H5S_get_simple_extent_dims(io_info->dset->shared->space, tmp_dims, NULL) < 0) + if(H5S_get_simple_extent_dims(dset->shared->space, tmp_dims, NULL) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get VDS dimensions") /* Copy virtual selection */ @@ -2229,7 +2230,7 @@ H5D__virtual_pre_io(H5D_io_info_t *io_info, /* Open source dataset */ if(!storage->list[i].sub_dset[j].dset) /* Try to open dataset */ - if(H5D__virtual_open_source_dset(io_info->dset, &storage->list[i], &storage->list[i].sub_dset[j], io_info->md_dxpl_id) < 0) + if(H5D__virtual_open_source_dset(dset, &storage->list[i], &storage->list[i].sub_dset[j], io_info->md_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open source dataset") /* If the source dataset is not open, mark the selected @@ -2266,7 +2267,7 @@ H5D__virtual_pre_io(H5D_io_info_t *io_info, /* Open source dataset */ if(!storage->list[i].source_dset.dset) /* Try to open dataset */ - if(H5D__virtual_open_source_dset(io_info->dset, &storage->list[i], &storage->list[i].source_dset, io_info->md_dxpl_id) < 0) + if(H5D__virtual_open_source_dset(dset, &storage->list[i], &storage->list[i].source_dset, io_info->md_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open source dataset") /* If the source dataset is not open, mark the selected elements @@ -2365,10 +2366,11 @@ H5D__virtual_post_io(H5O_storage_virtual_t *storage) *------------------------------------------------------------------------- */ static herr_t -H5D__virtual_read_one(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - const H5S_t *file_space, H5O_storage_virtual_srcdset_t *source_dset) +H5D__virtual_read_one(H5D_dset_info_t *dset_info, const H5D_type_info_t *type_info, + const H5S_t *file_space, H5O_storage_virtual_srcdset_t *source_dset, hid_t dxpl_id) { H5S_t *projected_src_space = NULL; /* File space for selection in a single source dataset */ + H5D_dset_info_t *dinfo = NULL; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2387,9 +2389,26 @@ H5D__virtual_read_one(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, if(H5S_select_project_intersection(source_dset->clipped_virtual_select, source_dset->clipped_source_select, file_space, &projected_src_space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLIP, FAIL, "can't project virtual intersection onto source space") - /* Perform read on source dataset */ - if(H5D__read(source_dset->dset, type_info->dst_type_id, source_dset->projected_mem_space, projected_src_space, io_info->raw_dxpl_id, io_info->u.rbuf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read source dataset") + { + hid_t file_id; /* File ID for operation */ + + /* Alloc dset_info */ + if(NULL == (dinfo = (H5D_dset_info_t *)H5MM_calloc(sizeof(H5D_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset info array buffer") + + dinfo->dset = source_dset->dset; + dinfo->mem_space = source_dset->projected_mem_space; + dinfo->file_space = projected_src_space; + dinfo->u.rbuf = dset_info->u.rbuf; + dinfo->mem_type_id = type_info->dst_type_id; + + /* Retrieve file_id */ + file_id = H5F_FILE_ID(dinfo->dset->oloc.file); + + /* Read in the point (with the custom VL memory allocator) */ + if(H5D__read(file_id, dxpl_id, 1, dinfo) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read source dataset") + } /* Close projected_src_space */ if(H5S_close(projected_src_space) < 0) @@ -2398,6 +2417,8 @@ H5D__virtual_read_one(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, } /* end if */ done: + if(dinfo) + H5MM_xfree(dinfo); /* Release allocated resources on failure */ if(projected_src_space) { HDassert(ret_value < 0); @@ -2424,7 +2445,7 @@ done: static herr_t H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *fm) + H5D_dset_info_t *dset_info) { H5O_storage_virtual_t *storage; /* Convenient pointer into layout struct */ hsize_t tot_nelmts; /* Total number of elements mapped to mem_space */ @@ -2436,22 +2457,24 @@ H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Sanity check */ HDassert(io_info); - HDassert(io_info->u.rbuf); + HDassert(dset_info); + HDassert(dset_info->u.rbuf); HDassert(type_info); + HDassert(dset_info == io_info->dsets_info); HDassert(mem_space); HDassert(file_space); - storage = &io_info->dset->shared->layout.storage.u.virt; + storage = &(dset_info->dset->shared->layout.storage.u.virt); HDassert((storage->view == H5D_VDS_FIRST_MISSING) || (storage->view == H5D_VDS_LAST_AVAILABLE)); #ifdef H5_HAVE_PARALLEL /* Parallel reads are not supported (yet) */ - if(H5F_HAS_FEATURE(io_info->dset->oloc.file, H5FD_FEAT_HAS_MPI)) + if(H5F_HAS_FEATURE(dset_info->dset->oloc.file, H5FD_FEAT_HAS_MPI)) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "parallel reads not supported on virtual datasets") #endif /* H5_HAVE_PARALLEL */ /* Prepare for I/O operation */ - if(H5D__virtual_pre_io(io_info, storage, file_space, mem_space, &tot_nelmts) < 0) + if(H5D__virtual_pre_io(dset_info, storage, file_space, mem_space, &tot_nelmts, io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLIP, FAIL, "unable to prepare for I/O operation") /* Iterate over mappings */ @@ -2464,12 +2487,14 @@ H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Iterate over sub-source dsets */ for(j = storage->list[i].sub_dset_io_start; j < storage->list[i].sub_dset_io_end; j++) - if(H5D__virtual_read_one(io_info, type_info, file_space, &storage->list[i].sub_dset[j]) < 0) + if(H5D__virtual_read_one(dset_info, type_info, file_space, &storage->list[i].sub_dset[j], + io_info->raw_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read source dataset") } /* end if */ else /* Read from source dataset */ - if(H5D__virtual_read_one(io_info, type_info, file_space, &storage->list[i].source_dset) < 0) + if(H5D__virtual_read_one(dset_info, type_info, file_space, &storage->list[i].source_dset, + io_info->raw_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read source dataset") } /* end for */ @@ -2478,7 +2503,7 @@ H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_fill_value_t fill_status; /* Fill value status */ /* Check the fill value status */ - if(H5P_is_fill_value_defined(&io_info->dset->shared->dcpl_cache.fill, &fill_status) < 0) + if(H5P_is_fill_value_defined(&dset_info->dset->shared->dcpl_cache.fill, &fill_status) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if fill value defined") /* Always write fill value to memory buffer unless it is undefined */ @@ -2505,7 +2530,7 @@ H5D__virtual_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, HGOTO_ERROR(H5E_DATASET, H5E_CANTCLIP, FAIL, "unable to clip fill selection") /* Write fill values to memory buffer */ - if(H5D__fill(io_info->dset->shared->dcpl_cache.fill.buf, io_info->dset->shared->type, io_info->u.rbuf, + if(H5D__fill(dset_info->dset->shared->dcpl_cache.fill.buf, dset_info->dset->shared->type, dset_info->u.rbuf, type_info->mem_type, fill_space, io_info->md_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "filling buf failed") @@ -2555,10 +2580,11 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D__virtual_write_one(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - const H5S_t *file_space, H5O_storage_virtual_srcdset_t *source_dset) +H5D__virtual_write_one(H5D_dset_info_t *dset_info, const H5D_type_info_t *type_info, + const H5S_t *file_space, H5O_storage_virtual_srcdset_t *source_dset, hid_t dxpl_id) { H5S_t *projected_src_space = NULL; /* File space for selection in a single source dataset */ + H5D_dset_info_t *dinfo = NULL; herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2579,9 +2605,26 @@ H5D__virtual_write_one(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, if(H5S_select_project_intersection(source_dset->virtual_select, source_dset->clipped_source_select, file_space, &projected_src_space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLIP, FAIL, "can't project virtual intersection onto source space") - /* Perform write on source dataset */ - if(H5D__write(source_dset->dset, type_info->dst_type_id, source_dset->projected_mem_space, projected_src_space, io_info->raw_dxpl_id, io_info->u.wbuf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write to source dataset") + { + hid_t file_id; /* File ID for operation */ + + /* Alloc dset_info */ + if(NULL == (dinfo = (H5D_dset_info_t *)H5MM_calloc(sizeof(H5D_dset_info_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dset info array buffer") + + dinfo->dset = source_dset->dset; + dinfo->mem_space = source_dset->projected_mem_space; + dinfo->file_space = projected_src_space; + dinfo->u.wbuf = dset_info->u.wbuf; + dinfo->mem_type_id = type_info->dst_type_id; + + /* Retrieve file_id */ + file_id = H5F_FILE_ID(dinfo->dset->oloc.file); + + /* Read in the point (with the custom VL memory allocator) */ + if(H5D__write(file_id, dxpl_id, 1, dinfo) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read source dataset") + } /* Close projected_src_space */ if(H5S_close(projected_src_space) < 0) @@ -2590,6 +2633,9 @@ H5D__virtual_write_one(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, } /* end if */ done: + if(dinfo) + H5MM_xfree(dinfo); + /* Release allocated resources on failure */ if(projected_src_space) { HDassert(ret_value < 0); @@ -2613,10 +2659,10 @@ done: * *------------------------------------------------------------------------- */ -static herr_t +static herr_t H5D__virtual_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space, - H5D_chunk_map_t H5_ATTR_UNUSED *fm) + H5D_dset_info_t *dset_info) { H5O_storage_virtual_t *storage; /* Convenient pointer into layout struct */ hsize_t tot_nelmts; /* Total number of elements mapped to mem_space */ @@ -2627,22 +2673,23 @@ H5D__virtual_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Sanity check */ HDassert(io_info); - HDassert(io_info->u.wbuf); + HDassert(dset_info); + HDassert(dset_info->u.wbuf); HDassert(type_info); HDassert(mem_space); HDassert(file_space); - storage = &io_info->dset->shared->layout.storage.u.virt; + storage = &(dset_info->dset->shared->layout.storage.u.virt); HDassert((storage->view == H5D_VDS_FIRST_MISSING) || (storage->view == H5D_VDS_LAST_AVAILABLE)); #ifdef H5_HAVE_PARALLEL /* Parallel writes are not supported (yet) */ - if(H5F_HAS_FEATURE(io_info->dset->oloc.file, H5FD_FEAT_HAS_MPI)) + if(H5F_HAS_FEATURE(dset_info->dset->oloc.file, H5FD_FEAT_HAS_MPI)) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "parallel writes not supported on virtual datasets") #endif /* H5_HAVE_PARALLEL */ /* Prepare for I/O operation */ - if(H5D__virtual_pre_io(io_info, storage, file_space, mem_space, &tot_nelmts) < 0) + if(H5D__virtual_pre_io(dset_info, storage, file_space, mem_space, &tot_nelmts, io_info) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCLIP, FAIL, "unable to prepare for I/O operation") /* Fail if there are unmapped parts of the selection as they would not be @@ -2660,12 +2707,14 @@ H5D__virtual_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, /* Iterate over sub-source dsets */ for(j = storage->list[i].sub_dset_io_start; j < storage->list[i].sub_dset_io_end; j++) - if(H5D__virtual_write_one(io_info, type_info, file_space, &storage->list[i].sub_dset[j]) < 0) + if(H5D__virtual_write_one(dset_info, type_info, file_space, &storage->list[i].sub_dset[j], + io_info->raw_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write to source dataset") } /* end if */ else /* Write to source dataset */ - if(H5D__virtual_write_one(io_info, type_info, file_space, &storage->list[i].source_dset) < 0) + if(H5D__virtual_write_one(dset_info, type_info, file_space, &storage->list[i].source_dset, + io_info->raw_dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write to source dataset") } /* end for */ diff --git a/src/H5FDmpi.h b/src/H5FDmpi.h index 784fe70..3081789 100644 --- a/src/H5FDmpi.h +++ b/src/H5FDmpi.h @@ -46,7 +46,6 @@ typedef enum H5FD_mpio_xfer_t { typedef enum H5FD_mpio_chunk_opt_t { H5FD_MPIO_CHUNK_DEFAULT = 0, H5FD_MPIO_CHUNK_ONE_IO, /*zero is the default*/ - H5FD_MPIO_CHUNK_MULTI_IO } H5FD_mpio_chunk_opt_t; /* Type of collective I/O */ diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index ed70e20..8c225f9 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -600,6 +600,9 @@ done: * *------------------------------------------------------------------------- */ +/* TODO: This can be removed as we decided to remove multi-chunk-opt feature. + * For now, leave it here to make 'enc_dec_plist_with_endianess' test pass. + * This can be removed after HDFFV-8281 done */ herr_t H5Pset_dxpl_mpio_chunk_opt_num(hid_t dxpl_id, unsigned num_chunk_per_proc) { @@ -645,6 +648,9 @@ done: * *------------------------------------------------------------------------- */ +/* TODO: This can be removed as we decided to remove multi-chunk-opt feature. + * For now, leave it here to make 'enc_dec_plist_with_endianess' test pass. + * This can be removed after HDFFV-8281 done */ herr_t H5Pset_dxpl_mpio_chunk_opt_ratio(hid_t dxpl_id, unsigned percent_num_proc_per_chunk) { @@ -995,7 +1001,7 @@ H5FD_mpio_open(const char *name, unsigned flags, hid_t fapl_id, if(MPI_SUCCESS != (mpi_code = MPI_File_open(comm_dup, name, mpi_amode, info_dup, &fh))) HMPI_GOTO_ERROR(NULL, "MPI_File_open failed", mpi_code) - file_opened=1; + file_opened = 1; /* Get the MPI rank of this process and the total number of processes */ if (MPI_SUCCESS != (mpi_code=MPI_Comm_rank (comm_dup, &mpi_rank))) diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c index 5434aa5..9f3f6b6 100644 --- a/src/H5Fmpi.c +++ b/src/H5Fmpi.c @@ -86,20 +86,24 @@ * Return: Success: The size (positive) * Failure: Negative * + * Programmer: Jonathan Kim + * June 5, 2013 + * + * Modifications: *------------------------------------------------------------------------- */ herr_t H5F_get_mpi_handle(const H5F_t *f, MPI_File **f_handle) { herr_t ret_value = SUCCEED; - hid_t fapl = -1; + hid_t fapl=-1; FUNC_ENTER_NOAPI(FAIL) assert(f && f->shared); /* Dispatch to driver */ - if ((ret_value = H5FD_get_vfd_handle(f->shared->lf, fapl, (void **)f_handle)) < 0) + if ((ret_value=H5FD_get_vfd_handle(f->shared->lf, fapl, f_handle)) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get mpi file handle") done: diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 5fdb8b8..91b5745 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -135,8 +135,7 @@ typedef enum H5D_mpio_actual_chunk_opt_mode_t { * I/O and contiguous collective I/O. */ H5D_MPIO_NO_CHUNK_OPTIMIZATION = 0, - H5D_MPIO_LINK_CHUNK, - H5D_MPIO_MULTI_CHUNK + H5D_MPIO_LINK_CHUNK } H5D_mpio_actual_chunk_opt_mode_t; typedef enum H5D_mpio_actual_io_mode_t { diff --git a/src/H5trace.c b/src/H5trace.c index a2ca7e2..0d64f13 100644 --- a/src/H5trace.c +++ b/src/H5trace.c @@ -446,10 +446,6 @@ H5_trace(const double *returning, const char *func, const char *type, ...) fprintf(out, "H5FD_MPIO_CHUNK_ONE_IO"); break; - case H5FD_MPIO_CHUNK_MULTI_IO: - fprintf(out, "H5FD_MPIO_CHUNK_MULTI_IO"); - break; - default: fprintf(out, "%ld", (long)opt); break; @@ -645,10 +641,6 @@ H5_trace(const double *returning, const char *func, const char *type, ...) fprintf(out, "H5D_MPIO_LINK_CHUNK"); break; - case H5D_MPIO_MULTI_CHUNK: - fprintf(out, "H5D_MPIO_MULTI_CHUNK"); - break; - default: fprintf(out, "%ld", (long)chunk_opt_mode); break; |