summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/H5Dio.c841
-rw-r--r--src/H5Dmpio.c28
-rw-r--r--src/H5Dpkg.h8
-rw-r--r--src/H5Smpio.c78
4 files changed, 430 insertions, 525 deletions
diff --git a/src/H5Dio.c b/src/H5Dio.c
index fa3f49e..3570cf6 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -78,7 +78,7 @@ static herr_t H5D_read(H5D_t *dataset, hid_t mem_type_id,
static herr_t H5D_write(H5D_t *dataset, hid_t mem_type_id,
const H5S_t *mem_space, const H5S_t *file_space,
hid_t dset_xfer_plist, const void *buf);
-static herr_t
+static herr_t
H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
const H5T_t *mem_type, const H5S_t *mem_space,
const H5S_t *file_space, H5T_path_t *tpath,
@@ -89,20 +89,20 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
const H5S_t *file_space, H5T_path_t *tpath,
hid_t src_id, hid_t dst_id, const void *buf);
static herr_t
-H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
+H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
const H5T_t *mem_type, const H5S_t *mem_space,
const H5S_t *file_space, H5T_path_t *tpath,
hid_t src_id, hid_t dst_id, void *buf/*out*/);
static herr_t
-H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
+H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
const H5T_t *mem_type, const H5S_t *mem_space,
const H5S_t *file_space, H5T_path_t *tpath,
hid_t src_id, hid_t dst_id, const void *buf);
#ifdef H5_HAVE_PARALLEL
-static herr_t
-H5D_io_assist_mpio(hid_t dxpl_id, H5D_dxpl_cache_t *dxpl_cache,
+static herr_t
+H5D_io_assist_mpio(hid_t dxpl_id, H5D_dxpl_cache_t *dxpl_cache,
hbool_t *xfer_mode_changed);
-static herr_t
+static herr_t
H5D_io_restore_mpio(hid_t dxpl_id);
static htri_t
H5D_get_collective_io_consensus(const H5F_t *file,
@@ -113,15 +113,15 @@ static herr_t H5D_mpio_get_mini_chunk(const H5D_t *dset,
const H5S_t *mem_space,
const H5S_t *file_space,
int *min_chunkf);
-static herr_t
+static herr_t
H5D_obtain_duplicate_pid(hid_t dxpl_id,
- hid_t* dp_id,
+ hid_t* dp_id,
H5D_dxpl_cache_t **cache);
#endif /* H5_HAVE_PARALLEL */
/* I/O info operations */
-static herr_t
+static herr_t
H5D_ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
hid_t dp_dxpl_id, H5D_dxpl_cache_t *dp_dxpl_cache,
const H5S_t *mem_space, const H5S_t *file_space,
@@ -371,7 +371,7 @@ H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache)
/* Get the data transform property */
if(H5P_get(dx_plist, H5D_XFER_XFORM_NAME, &cache->data_xform_prop)<0)
HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve data transform info")
-
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_get_dxpl_cache_real() */
@@ -427,7 +427,7 @@ done:
* Purpose: Reads (part of) a DSET from the file into application
* memory BUF. The part of the dataset to read is defined with
* MEM_SPACE_ID and FILE_SPACE_ID. The data points are
- * converted from their file type to the MEM_TYPE_ID specified.
+ * converted from their file type to the MEM_TYPE_ID specified.
* Additional miscellaneous data transfer properties can be
* passed to this function with the PLIST_ID argument.
*
@@ -447,12 +447,12 @@ done:
* Return: Non-negative on success/Negative on failure
*
* Errors:
- * ARGS BADTYPE Not a data space.
- * ARGS BADTYPE Not a data type.
- * ARGS BADTYPE Not a dataset.
- * ARGS BADTYPE Not xfer parms.
- * ARGS BADVALUE No output buffer.
- * DATASET READERROR Can't read data.
+ * ARGS BADTYPE Not a data space.
+ * ARGS BADTYPE Not a data type.
+ * ARGS BADTYPE Not a dataset.
+ * ARGS BADTYPE Not xfer parms.
+ * ARGS BADVALUE No output buffer.
+ * DATASET READERROR Can't read data.
*
* Programmer: Robb Matzke
* Thursday, December 4, 1997
@@ -639,7 +639,7 @@ done:
* Changed the way to retrieve property for generic property list.
*
* Raymond Lu, 2002-2-26
- * For the new fill value design, data space can either be allocated
+ * For the new fill value design, data space can either be allocated
* or not allocated at this stage. Fill value or data from space is
* returned to outgoing buffer.
*
@@ -663,9 +663,8 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
H5D_dxpl_cache_t *dp_dxpl_cache=&_dp_dxpl_cache; /* Data transfer property cache */
hid_t dp_id;
#ifdef H5_HAVE_PARALLEL
- hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
- H5FD_mpio_xfer_t xfer_mode;
-
+ hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
+ H5FD_mpio_xfer_t xfer_mode;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
int prop_value,new_value;
htri_t check_prop;
@@ -724,12 +723,12 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
/* Retrieve dataset properties */
/* <none needed in the general case> */
- /* If space hasn't been allocated and not using external storage,
+ /* If space hasn't been allocated and not using external storage,
* return fill value to buffer if fill time is upon allocation, or
- * do nothing if fill time is never. If the dataset is compact and
+ * do nothing if fill time is never. If the dataset is compact and
* fill time is NEVER, there is no way to tell whether part of data
- * has been overwritten. So just proceed in reading.
- */
+ * has been overwritten. So just proceed in reading.
+ */
if(nelmts > 0 && dataset->shared->efl.nused==0 &&
((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|| (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))) {
@@ -785,12 +784,11 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
} /* end switch */
#ifdef H5_HAVE_PARALLEL
- /* Obtain duplicate property list id. This is used to handle
+ /* Obtain duplicate property list id. This is used to handle
collective chunk IO. */
-
- if(H5D_obtain_duplicate_pid(dxpl_id,&dp_id,&dp_dxpl_cache)<0)
+ if(H5D_obtain_duplicate_pid(dxpl_id,&dp_id,&dp_dxpl_cache)<0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't obtain duplicated property id")
-#endif
+#endif
/* Set up I/O operation */
if(H5D_ioinfo_init(dataset,dxpl_cache,dxpl_id,dp_id,dp_dxpl_cache,mem_space,file_space,sconv_flags,&use_par_opt_io,&io_info)<0)
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation")
@@ -798,17 +796,17 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
#ifdef H5_HAVE_PARALLEL
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
/**** Test for collective chunk IO
- notice the following code should be removed after
+ notice the following code should be removed after
a more general collective chunk IO algorithm is applied.
*/
if(dataset->shared->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
check_prop = H5Pexist(dxpl_id,H5D_XFER_COLL_CHUNK_NAME);
- if(check_prop < 0)
+ if(check_prop < 0)
HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to check property list");
if(check_prop > 0) {
- if(H5Pget(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&prop_value)<0)
- HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
+ if(H5Pget(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&prop_value)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
if(!use_par_opt_io) {
new_value = 0;
if(H5Pset(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&new_value)<0)
@@ -873,7 +871,7 @@ done:
* Added the code that when it detects it is not safe to process a
* COLLECTIVE write request without hanging, it changes it to
* INDEPENDENT calls.
- *
+ *
* Albert Cheng, 2000-11-27
* Changed to use the optimized MPIO transfer for Collective calls only.
*
@@ -881,8 +879,8 @@ done:
* Changed the way to retrieve property for generic property list.
*
* Raymond Lu, 2002-2-26
- * For the new fill value design, space may not be allocated until
- * this function is called. Allocate and initialize space if it
+ * For the new fill value design, space may not be allocated until
+ * this function is called. Allocate and initialize space if it
* hasn't been.
*
* QAK - 2002/04/02
@@ -907,8 +905,8 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
H5D_dxpl_cache_t _dp_dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dp_dxpl_cache=&_dp_dxpl_cache; /* Data transfer property cache */
#ifdef H5_HAVE_PARALLEL
- hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
- H5FD_mpio_xfer_t xfer_mode;
+ hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */
+ H5FD_mpio_xfer_t xfer_mode;
int mpi_rank;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
int prop_value,new_value;
@@ -964,8 +962,8 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
if (!file_space)
file_space = dataset->shared->space;
- if (!mem_space)
- mem_space = file_space;
+ if (!mem_space)
+ mem_space = file_space;
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space))<0)
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
H5_ASSIGN_OVERFLOW(nelmts,snelmts,hssize_t,hsize_t);
@@ -995,7 +993,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
/* <none needed currently> */
/* Allocate data space and initialize it if it hasn't been. */
- if(nelmts > 0 && dataset->shared->efl.nused==0 &&
+ if(nelmts > 0 && dataset->shared->efl.nused==0 &&
((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|| (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))) {
hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */
@@ -1051,22 +1049,22 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
/* Set up I/O operation */
if(H5D_ioinfo_init(dataset,dxpl_cache,dxpl_id,dp_id,dp_dxpl_cache,mem_space,file_space,sconv_flags,&use_par_opt_io,&io_info)<0)
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation")
-
+
#ifdef H5_HAVE_PARALLEL
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
/**** Test for collective chunk IO
- notice the following code should be removed after
+ notice the following code should be removed after
a more general collective chunk IO algorithm is applied.
*/
if(dataset->shared->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
-
+
check_prop = H5Pexist(dxpl_id,H5D_XFER_COLL_CHUNK_NAME);
- if(check_prop < 0)
+ if(check_prop < 0)
HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to check property list");
if(check_prop > 0) {
- if(H5Pget(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&prop_value)<0)
- HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
+ if(H5Pget(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&prop_value)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value");
if(!use_par_opt_io) {
new_value = 0;
if(H5Pset(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&new_value)<0)
@@ -1137,7 +1135,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
const H5T_t *mem_type, const H5S_t *mem_space,
const H5S_t *file_space, H5T_path_t *tpath,
@@ -1145,7 +1143,7 @@ H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
{
H5D_t *dataset=io_info->dset; /* Local pointer to dataset info */
const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */
- herr_t status; /*function return status*/
+ herr_t status; /*function return status*/
#ifdef H5S_DEBUG
H5_timer_t timer;
#endif
@@ -1196,7 +1194,7 @@ H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
|| dataset->shared->layout.type==H5D_COMPACT);
H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t);
status = (io_info->ops.read)(io_info,
- (size_t)nelmts, H5T_get_size(dataset->shared->type),
+ (size_t)nelmts, H5T_get_size(dataset->shared->type),
file_space, mem_space,
buf/*out*/);
#ifdef H5S_DEBUG
@@ -1237,7 +1235,7 @@ H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
target_size=(size_t)(nelmts*max_type_size);
} /* end if */
request_nelmts = target_size / max_type_size;
-
+
/* Sanity check elements in temporary buffer */
if (request_nelmts==0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small")
@@ -1296,10 +1294,10 @@ H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
- /* Sanity check that space is allocated, then read data from it */
+ /* Sanity check that space is allocated, then read data from it */
assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
- || dataset->shared->efl.nused>0 ||
+ || dataset->shared->efl.nused>0 ||
dataset->shared->layout.type==H5D_COMPACT);
n = H5D_select_fgath(io_info,
file_space, &file_iter, smine_nelmts,
@@ -1376,7 +1374,7 @@ done:
H5FL_BLK_FREE(type_conv,tconv_buf);
if (bkg_buf && NULL==dxpl_cache->bkgr_buf)
H5FL_BLK_FREE(type_conv,bkg_buf);
-
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_contig_read() */
@@ -1407,7 +1405,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
{
H5D_t *dataset=io_info->dset; /* Local pointer to dataset info */
const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */
- herr_t status; /*function return status*/
+ herr_t status; /*function return status*/
#ifdef H5S_DEBUG
H5_timer_t timer;
#endif
@@ -1469,7 +1467,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
/* direct xfer accomplished successfully */
HGOTO_DONE(SUCCEED)
} /* end if */
-
+
/*
* This is the general case.
*/
@@ -1509,7 +1507,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
if (H5S_select_iter_init(&bkg_iter, file_space, dst_type_size)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize background selection information")
bkg_iter_init=1; /*file selection iteration info has been initialized */
-
+
/*
* Get a temporary buffer for type conversion unless the app has already
* supplied one through the xfer properties. Instead of allocating a
@@ -1518,7 +1516,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
* same size over and over.
*/
if(H5T_detect_class(dataset->shared->type, H5T_VLEN)) {
- /* Old data is retrieved into background buffer for VL datatype. The
+ /* Old data is retrieved into background buffer for VL datatype. The
* data is used later for freeing heap objects. */
need_bkg = H5T_BKG_YES;
} else if (H5T_path_bkg(tpath)) {
@@ -1573,7 +1571,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
#endif
n = H5D_select_fgath(io_info,
file_space, &bkg_iter, smine_nelmts,
- bkg_buf/*out*/);
+ bkg_buf/*out*/);
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].bkg_timer), &timer);
@@ -1590,7 +1588,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, io_info->dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed")
- /* Do the data transform after the type conversion (since we're using dataset->shared->type). */
+ /* Do the data transform after the type conversion (since we're using dataset->shared->type). */
if(!H5Z_xform_noop(dxpl_cache->data_xform_prop))
if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->shared->type) < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform")
@@ -1665,7 +1663,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */
fm_map fm; /* File<->memory mapping */
H5SL_node_t *chunk_node; /* Current node in chunk skip list */
- herr_t status; /*function return status*/
+ herr_t status; /*function return status*/
#ifdef H5S_DEBUG
H5_timer_t timer;
#endif
@@ -1675,7 +1673,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
size_t target_size; /*desired buffer size */
size_t request_nelmts; /*requested strip mine */
hsize_t smine_start; /*strip mine start loc */
- size_t n, smine_nelmts; /*elements per strip */
+ size_t n, smine_nelmts; /*elements per strip */
H5S_sel_iter_t mem_iter; /*memory selection iteration info*/
hbool_t mem_iter_init=0; /*memory selection iteration info has been initialized */
H5S_sel_iter_t bkg_iter; /*background iteration info*/
@@ -1690,12 +1688,12 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5_HAVE_PARALLEL
int count_chunk,mpi_rank, mpi_code,min_num_chunk,is_regular,all_regular;
- hid_t temp_id;
+ hid_t temp_id;
MPI_Comm comm;
#endif
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read)
-
+
/* Map elements between file and memory for each chunk*/
if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping")
@@ -1719,13 +1717,15 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
/* Get first node in chunk skip list */
chunk_node=H5SL_first(fm.fsel);
+
#ifdef H5_HAVE_PARALLEL
if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
- if(H5D_mpio_get_mini_chunk(dataset,mem_space,file_space,&min_num_chunk)<0)
+ if(H5D_mpio_get_mini_chunk(dataset,mem_space,file_space,&min_num_chunk)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get minimum number of chunk")
- }
+ }
count_chunk = 0;
#endif
+
/* Iterate through chunks to be operated on */
while(chunk_node) {
H5D_chunk_info_t *chunk_info; /* chunk information */
@@ -1738,152 +1738,110 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
store.chunk.index = chunk_info->index;
#ifdef H5_HAVE_PARALLEL
-
- count_chunk++;
- if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
+ count_chunk++;
+ if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
/* If the number of chunk is greater than minimum number of chunk,
Do independent read */
- if(count_chunk <= min_num_chunk) {
+ if(count_chunk <= min_num_chunk) {
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
- if(H5S_SELECT_IS_REGULAR(chunk_info->fspace) == TRUE &&
- H5S_SELECT_IS_REGULAR(chunk_info->mspace) == TRUE)
- is_regular = 1;
- else is_regular = 0;
-
- /* Getting MPI communicator and rank */
- if((comm = H5F_mpi_get_comm(dataset->ent.file))==MPI_COMM_NULL)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
- if((mpi_rank = H5F_mpi_get_rank(dataset->ent.file))<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank")
-
- if (MPI_SUCCESS != (mpi_code= MPI_Reduce(&all_regular,&is_regular,1,MPI_INT,MPI_MIN,0,comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Reduce failed", mpi_code)
- if (MPI_SUCCESS != (mpi_code= MPI_Bcast(&all_regular,1,MPI_INT,0,comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
-#endif
- }
-
- if(count_chunk > min_num_chunk) {
- temp_id = io_info->dxpl_id;
- io_info->dxpl_id = io_info->dp_dxpl_id;
- status = (io_info->ops_sca.read)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
+ if(H5S_SELECT_IS_REGULAR(chunk_info->fspace) == TRUE &&
+ H5S_SELECT_IS_REGULAR(chunk_info->mspace) == TRUE)
+ is_regular = 1;
+ else
+ is_regular = 0;
+
+ /* Getting MPI communicator and rank */
+ if((comm = H5F_mpi_get_comm(dataset->ent.file))==MPI_COMM_NULL)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
+ if((mpi_rank = H5F_mpi_get_rank(dataset->ent.file))<0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank")
+
+ if (MPI_SUCCESS != (mpi_code= MPI_Reduce(&all_regular,&is_regular,1,MPI_INT,MPI_MIN,0,comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Reduce failed", mpi_code)
+ if (MPI_SUCCESS != (mpi_code= MPI_Bcast(&all_regular,1,MPI_INT,0,comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
+#endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ }
+
+ if(count_chunk > min_num_chunk) {
+ temp_id = io_info->dxpl_id;
+ io_info->dxpl_id = io_info->dp_dxpl_id;
+ status = (io_info->ops_sca.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
io_info->dxpl_id = temp_id;
- }
-
-
- else if((H5S_SELECT_IS_REGULAR(chunk_info->fspace) == FALSE)||
- (H5S_SELECT_IS_REGULAR(chunk_info->mspace) == FALSE)){
-
+ }
+ else if((H5S_SELECT_IS_REGULAR(chunk_info->fspace) == FALSE) ||
+ (H5S_SELECT_IS_REGULAR(chunk_info->mspace) == FALSE)) {
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
- /* Perform the independent read operation */
- temp_id = io_info->dxpl_id;
- io_info->dxpl_id = io_info->dp_dxpl_id;
- status = (io_info->ops_sca.read)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
- io_info->dxpl_id = temp_id;
-#else
-
- /* Perform the actual collective read operation */
- status = (io_info->ops.read)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
-#endif
- }
-
- else {
- /* For regular selection,
+ /* Perform the independent read operation */
+ temp_id = io_info->dxpl_id;
+ io_info->dxpl_id = io_info->dp_dxpl_id;
+ status = (io_info->ops_sca.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+ io_info->dxpl_id = temp_id;
+#else /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ /* Perform the actual collective read operation */
+ status = (io_info->ops.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+#endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ }
+ else {
+ /* For regular selection,
if MPI_COMPLEX_DERIVED_DATATYPE is not defined,
unless spaces for all processors are regular, independent read operation should be performed.*/
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
- if(!all_regular) {
-
- /* Perform the independent read operation */
- temp_id = io_info->dxpl_id;
- io_info->dxpl_id = io_info->dp_dxpl_id;
- status = (io_info->ops_sca.read)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
- io_info->dxpl_id = temp_id;
- }
-
- else {
- /* For regular collective read in parallel*/
- /* Perform the read operation */
- status = (io_info->ops.read)(io_info,
- chunk_info->chunk_points,
- H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
- }
-#else
-
- /* For regular collective read in parallel*/
- /* Perform the read operation */
- status = (io_info->ops.read)(io_info,
- chunk_info->chunk_points,
- H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
-#endif
+ if(!all_regular) {
+ /* Perform the independent read operation */
+ temp_id = io_info->dxpl_id;
+ io_info->dxpl_id = io_info->dp_dxpl_id;
+ status = (io_info->ops_sca.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+ io_info->dxpl_id = temp_id;
+ }
+ else {
+ /* For regular collective read in parallel*/
+ /* Perform the read operation */
+ status = (io_info->ops.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
}
-
- }
- else {
- /* For regular independent read in parallel*/
- /* Perform the read operation */
- status = (io_info->ops.read)(io_info,
- chunk_info->chunk_points,
- H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
- }
-
-#else
- /* Perform the actual read operation for sequential*/
- status = (io_info->ops.read)(io_info,
- chunk_info->chunk_points,
- H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
-#endif
+#else /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ /* For regular collective read in parallel*/
+ /* Perform the read operation */
+ status = (io_info->ops.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+#endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ }
+ }
+ else {
+ /* For regular independent read in parallel*/
+ /* Perform the read operation */
+ status = (io_info->ops.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+ }
+#else /* H5_HAVE_PARALLEL */
+ /* Perform the actual read operation for sequential*/
+ status = (io_info->ops.read)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Check return value from optimized read */
+ if (status<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
/* Get the next chunk node in the skip list */
chunk_node=H5SL_next(chunk_node);
} /* end while */
-
+
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[1].read_timer), &timer);
io_info->stats->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->shared->type);
@@ -1918,7 +1876,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
target_size=(size_t)(nelmts*max_type_size);
} /* end if */
request_nelmts = target_size / max_type_size;
-
+
/* Sanity check elements in temporary buffer */
if (request_nelmts==0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small")
@@ -1951,7 +1909,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
if((bkg_buf=H5FL_BLK_CALLOC(type_conv,(request_nelmts*dst_type_size)))==NULL)
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for background conversion")
} /* end if */
-
+
/* Loop over all the chunks, performing I/O on each */
/* Get first node in chunk skip list */
@@ -1974,16 +1932,16 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
if (H5S_select_iter_init(&bkg_iter, chunk_info->mspace, dst_type_size)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize background selection information")
bkg_iter_init=1; /*file selection iteration info has been initialized */
-
+
/* Pass in chunk's coordinates in a union*/
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
-
+
for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) {
/* Go figure out how many elements to read from the file */
assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(chunk_info->chunk_points-smine_start));
smine_nelmts = (size_t)MIN(request_nelmts, (chunk_info->chunk_points-smine_start));
-
+
/*
* Gather the data from disk into the data type conversion
* buffer. Also gather data from application to background buffer
@@ -1992,11 +1950,11 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
- /* Sanity check that space is allocated, then read data from it */
+ /* Sanity check that space is allocated, then read data from it */
assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|| dataset->shared->efl.nused>0 || dataset->shared->layout.type==H5D_COMPACT);
- n = H5D_select_fgath(io_info,
+ n = H5D_select_fgath(io_info,
chunk_info->fspace, &file_iter, smine_nelmts,
tconv_buf/*out*/);
@@ -2022,11 +1980,11 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
if (n!=smine_nelmts)
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "mem gather failed")
} /* end if */
-
+
/*
* Perform data type conversion.
*/
- if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0,
+ if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0,
tconv_buf, bkg_buf, io_info->dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed")
@@ -2072,7 +2030,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
/* Get the next chunk node in the skip list */
chunk_node=H5SL_next(chunk_node);
} /* end while */
-
+
done:
/* Release selection iterators, if necessary */
if(file_iter_init) {
@@ -2092,11 +2050,11 @@ done:
H5FL_BLK_FREE(type_conv,tconv_buf);
if (bkg_buf && NULL==dxpl_cache->bkgr_buf)
H5FL_BLK_FREE(type_conv,bkg_buf);
-
+
/* Release chunk mapping information */
if(H5D_destroy_chunk_map(&fm) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't release chunk mapping")
-
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D_chunk_read() */
@@ -2131,7 +2089,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */
fm_map fm; /* File<->memory mapping */
H5SL_node_t *chunk_node; /* Current node in chunk skip list */
- herr_t status; /*function return status*/
+ herr_t status; /*function return status*/
#ifdef H5S_DEBUG
H5_timer_t timer;
#endif
@@ -2141,7 +2099,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
size_t target_size; /*desired buffer size */
size_t request_nelmts; /*requested strip mine */
hsize_t smine_start; /*strip mine start loc */
- size_t n, smine_nelmts; /*elements per strip */
+ size_t n, smine_nelmts; /*elements per strip */
H5S_sel_iter_t mem_iter; /*memory selection iteration info*/
hbool_t mem_iter_init=0; /*memory selection iteration info has been initialized */
H5S_sel_iter_t bkg_iter; /*background iteration info*/
@@ -2158,15 +2116,15 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
hid_t temp_id;
int count_chunk,mpi_rank,mpi_code,min_num_chunk,is_regular,all_regular = 0;
MPI_Comm comm;
-
+
#endif
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_write)
-
+
/* Map elements between file and memory for each chunk*/
if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping")
-
+
/* Set dataset storage for I/O info */
io_info->store=&store;
@@ -2181,11 +2139,12 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5_HAVE_PARALLEL
if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
- if(H5D_mpio_get_mini_chunk(dataset,mem_space,file_space,&min_num_chunk)<0)
+ if(H5D_mpio_get_mini_chunk(dataset,mem_space,file_space,&min_num_chunk)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get minimum number of chunk")
- }
+ }
count_chunk = 0;
#endif
+
/* Get first node in chunk skip list */
chunk_node=H5SL_first(fm.fsel);
@@ -2201,140 +2160,109 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
store.chunk.index = chunk_info->index;
#ifdef H5_HAVE_PARALLEL
-
- count_chunk++;
- if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
+ count_chunk++;
+ if(io_info->dxpl_cache->xfer_mode == H5FD_MPIO_COLLECTIVE) {
/* If the number of chunk is greater than minimum number of chunk,
Do independent write */
- if(count_chunk <= min_num_chunk) {
+ if(count_chunk <= min_num_chunk) {
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
- if(H5S_SELECT_IS_REGULAR(chunk_info->fspace) == TRUE &&
- H5S_SELECT_IS_REGULAR(chunk_info->mspace) == TRUE)
- is_regular = 1;
- else is_regular = 0;
- /* Getting MPI communicator and rank */
- if((comm = H5F_mpi_get_comm(dataset->ent.file))==MPI_COMM_NULL)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
- if((mpi_rank = H5F_mpi_get_rank(dataset->ent.file))<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank")
- if (MPI_SUCCESS != (mpi_code= MPI_Reduce(&all_regular,&is_regular,1,MPI_INT,MPI_MIN,0,comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Reduce failed", mpi_code)
- if (MPI_SUCCESS != (mpi_code= MPI_Bcast(&all_regular,1,MPI_INT,0,comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
-#endif
- }
- if(count_chunk > min_num_chunk) {
- temp_id = io_info->dxpl_id;
- io_info->dxpl_id = io_info->dp_dxpl_id;
- fflush(stdout);
- status = (io_info->ops_sca.write)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized write */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
+ if(H5S_SELECT_IS_REGULAR(chunk_info->fspace) == TRUE &&
+ H5S_SELECT_IS_REGULAR(chunk_info->mspace) == TRUE)
+ is_regular = 1;
+ else
+ is_regular = 0;
+
+ /* Getting MPI communicator and rank */
+ if((comm = H5F_mpi_get_comm(dataset->ent.file))==MPI_COMM_NULL)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
+ if((mpi_rank = H5F_mpi_get_rank(dataset->ent.file))<0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank")
+
+ if (MPI_SUCCESS != (mpi_code= MPI_Reduce(&all_regular,&is_regular,1,MPI_INT,MPI_MIN,0,comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Reduce failed", mpi_code)
+ if (MPI_SUCCESS != (mpi_code= MPI_Bcast(&all_regular,1,MPI_INT,0,comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
+#endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ }
+
+ if(count_chunk > min_num_chunk) {
+ temp_id = io_info->dxpl_id;
+ io_info->dxpl_id = io_info->dp_dxpl_id;
+ status = (io_info->ops_sca.write)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+
+ /* Check return value from optimized write */
+ if (status<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
io_info->dxpl_id = temp_id;
- }
-
-
- else if((H5S_SELECT_IS_REGULAR(chunk_info->fspace) == FALSE)||
- (H5S_SELECT_IS_REGULAR(chunk_info->mspace) == FALSE)){
+ }
+ else if((H5S_SELECT_IS_REGULAR(chunk_info->fspace) == FALSE) ||
+ (H5S_SELECT_IS_REGULAR(chunk_info->mspace) == FALSE)) {
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
- /* Perform the independent write operation */
-
- temp_id = io_info->dxpl_id;
- io_info->dxpl_id = io_info->dp_dxpl_id;
- status = (io_info->ops_sca.write)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
-
- /* Check return value from optimized write */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
- io_info->dxpl_id = temp_id;
-#else
-
- /* Perform the actual collective write operation */
- status = (io_info->ops.write)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized write */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
-#endif
- }
-
- else {
+ /* Perform the independent write operation */
+ temp_id = io_info->dxpl_id;
+ io_info->dxpl_id = io_info->dp_dxpl_id;
+ status = (io_info->ops_sca.write)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+ io_info->dxpl_id = temp_id;
+#else /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ /* Perform the actual collective write operation */
+ status = (io_info->ops.write)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+#endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ }
+ else {
#ifndef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS
- if(!all_regular) {
-
- /* Perform the independent write operation */
- temp_id = io_info->dxpl_id;
- io_info->dxpl_id = io_info->dp_dxpl_id;
- status = (io_info->ops_sca.write)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized read */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
- io_info->dxpl_id = temp_id;
- }
- else {
- /* For regular selection, perform the collective write operation */
- status = (io_info->ops.write)(io_info,
+ if(!all_regular) {
+ /* Perform the independent write operation */
+ temp_id = io_info->dxpl_id;
+ io_info->dxpl_id = io_info->dp_dxpl_id;
+ status = (io_info->ops_sca.write)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+ io_info->dxpl_id = temp_id;
+ }
+ else {
+ /* For regular selection, perform the collective write operation */
+ status = (io_info->ops.write)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+ }
+#else /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ status = (io_info->ops.write)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+#endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
+ }
+ }
+ else {
+ /* For independent parallel write*/
+ /* Perform the write operation */
+ status = (io_info->ops.write)(io_info,
chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized write */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
- }
-#else
+ chunk_info->fspace, chunk_info->mspace, buf);
+ }
- status = (io_info->ops.write)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized write */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
-
-#endif
- }
- }
- else {
- /* For independent parallel write*/
- /* Perform the write operation */
- status = (io_info->ops.write)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized write */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
- }
+#else /* H5_HAVE_PARALLEL */
+ /* Perform the actual write operation for sequential*/
+ status = (io_info->ops.write)(io_info,
+ chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
+ chunk_info->fspace, chunk_info->mspace, buf);
+#endif /* H5_HAVE_PARALLEL */
-#else
- /* Perform the actual write operation for sequential*/
- status = (io_info->ops.write)(io_info,
- chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
- chunk_info->fspace, chunk_info->mspace,
- buf);
- /* Check return value from optimized write */
- if (status<0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
-#endif
+ /* Check return value from optimized write */
+ if (status<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
/* Get the next chunk node in the skip list */
chunk_node=H5SL_next(chunk_node);
} /* end while */
-
+
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].write_timer), &timer);
io_info->stats->stats[0].write_nbytes += nelmts * H5T_get_size(mem_type);
@@ -2344,13 +2272,13 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
/* direct xfer accomplished successfully */
HGOTO_DONE(SUCCEED)
} /* end if */
-
+
/*
* This is the general case (type conversion, usually).
*/
if(nelmts==0)
HGOTO_DONE(SUCCEED)
-
+
/* Compute element sizes and other parameters */
src_type_size = H5T_get_size(mem_type);
dst_type_size = H5T_get_size(dataset->shared->type);
@@ -2382,7 +2310,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
* same size over and over.
*/
if(H5T_detect_class(dataset->shared->type, H5T_VLEN)) {
- /* Old data is retrieved into background buffer for VL datatype. The
+ /* Old data is retrieved into background buffer for VL datatype. The
* data is used later for freeing heap objects. */
need_bkg = H5T_BKG_YES;
} else if (H5T_path_bkg(tpath)) {
@@ -2429,16 +2357,16 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
if (H5S_select_iter_init(&bkg_iter, chunk_info->fspace, dst_type_size)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize background selection information")
bkg_iter_init=1; /*file selection iteration info has been initialized */
-
+
/*pass in chunk's coordinates in a union*/
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
-
+
for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) {
/* Go figure out how many elements to read from the file */
assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(chunk_info->chunk_points-smine_start));
smine_nelmts = (size_t)MIN(request_nelmts, (chunk_info->chunk_points-smine_start));
-
+
/*
* Gather the data from disk into the data type conversion
* buffer. Also gather data from application to background buffer
@@ -2449,7 +2377,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#endif
n = H5D_select_mgath(buf, chunk_info->mspace, &mem_iter,
smine_nelmts, dxpl_cache, tconv_buf/*out*/);
-
+
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[1].gath_timer), &timer);
io_info->stats->stats[1].gath_nbytes += n * src_type_size;
@@ -2464,7 +2392,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#endif
n = H5D_select_fgath(io_info,
chunk_info->fspace, &bkg_iter, smine_nelmts,
- bkg_buf/*out*/);
+ bkg_buf/*out*/);
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].bkg_timer), &timer);
@@ -2478,7 +2406,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
/*
* Perform data type conversion.
*/
- if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0,
+ if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0,
tconv_buf, bkg_buf, io_info->dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed")
@@ -2541,12 +2469,12 @@ done:
if(H5S_SELECT_ITER_RELEASE(&bkg_iter)<0)
HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator")
} /* end if */
-
+
if (tconv_buf && NULL==dxpl_cache->tconv_buf)
H5FL_BLK_FREE(type_conv,tconv_buf);
if (bkg_buf && NULL==dxpl_cache->bkgr_buf)
H5FL_BLK_FREE(type_conv,bkg_buf);
-
+
/* Release chunk mapping information */
if(H5D_destroy_chunk_map(&fm) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't release chunk mapping")
@@ -2573,14 +2501,14 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5D_io_assist_mpio(hid_t dxpl_id, H5D_dxpl_cache_t *dxpl_cache,
+static herr_t
+H5D_io_assist_mpio(hid_t dxpl_id, H5D_dxpl_cache_t *dxpl_cache,
hbool_t *xfer_mode_changed)
-{
+{
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_io_assist_mpio)
-
+
/* The following may not handle a collective call correctly
* since it does not ensure all processes can handle the write
* request according to the MPI collective specification.
@@ -2612,7 +2540,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5D_io_restore_mpio
*
- * Purpose: Common logic for restoring MPI transfer mode
+ * Purpose: Common logic for restoring MPI transfer mode
*
* Return: Non-negative on success/Negative on failure
*
@@ -2623,15 +2551,15 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_io_restore_mpio(hid_t dxpl_id)
-{
+{
H5P_genplist_t *dx_plist; /* Data transer property list */
H5FD_mpio_xfer_t xfer_mode; /*xfer_mode for this request */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_io_restore_mpio)
-
+
/* Get the dataset transfer property list */
if (NULL == (dx_plist = H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
@@ -2669,7 +2597,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_space,
+H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_space,
const H5S_t *mem_space, fm_map *fm)
{
H5S_t *tmp_mspace=NULL; /* Temporary memory dataspace */
@@ -2686,12 +2614,12 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
char bogus; /* "bogus" buffer to pass to selection iterator */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_map)
/* Get layout for dataset */
fm->layout = &(dataset->shared->layout);
-
+
/* Check if the memory space is scalar & make equivalent memory space */
if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space))<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number")
@@ -2716,7 +2644,7 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
/* Set the number of dimensions for the memory dataspace */
H5_ASSIGN_OVERFLOW(fm->m_ndims,sm_ndims,int,unsigned);
} /* end else */
-
+
/* Get dim number and dimensionality for each dataspace */
fm->f_ndims=f_ndims=dataset->shared->layout.u.chunk.ndims-1;
@@ -2741,7 +2669,7 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
/* Round up to the next integer # of chunks, to accomodate partial chunks */
fm->chunks[u] = ((fm->f_dims[u]+dataset->shared->layout.u.chunk.dim[u])-1) / dataset->shared->layout.u.chunk.dim[u];
} /* end for */
-
+
/* Compute the "down" size of 'chunks' information */
if(H5V_array_down(f_ndims,fm->chunks,fm->down_chunks)<0)
HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes")
@@ -2770,7 +2698,7 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
/* Create temporary datatypes for selection iteration */
if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->shared->type, H5T_COPY_ALL)))<0)
HGOTO_ERROR (H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register file datatype")
-
+
/* Spaces aren't the same shape, iterate over the memory selection directly */
if(H5S_select_iterate(&bogus, f_tid, file_space, H5D_chunk_file_cb, fm)<0)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections")
@@ -2819,7 +2747,7 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
/* Make a copy of equivalent memory space */
if((tmp_mspace = H5S_copy(equiv_mspace,TRUE))==NULL)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space")
-
+
/* De-select the mem space copy */
if(H5S_select_none(tmp_mspace)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to de-select memory space")
@@ -2832,7 +2760,7 @@ H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *f
if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->shared->type, H5T_COPY_ALL)))<0)
HGOTO_ERROR (H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register file datatype")
} /* end if */
-
+
/* Create selection iterator for memory selection */
if((elmt_size=H5T_get_size(mem_type))==0)
HGOTO_ERROR(H5E_DATATYPE, H5E_BADSIZE, FAIL, "datatype size invalid")
@@ -2897,7 +2825,7 @@ done:
if(H5S_hyper_denormalize_offset(file_space, old_offset)<0)
HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset")
} /* end if */
-
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_create_chunk_map() */
@@ -2960,7 +2888,7 @@ static herr_t
H5D_destroy_chunk_map(const fm_map *fm)
{
herr_t ret_value = SUCCEED; /*return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_destroy_chunk_map)
/* Free the chunk info skip list */
@@ -2996,7 +2924,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_create_chunk_file_map_hyper(const fm_map *fm)
{
hssize_t ssel_points; /* Number of elements in file selection */
@@ -3010,7 +2938,7 @@ H5D_create_chunk_file_map_hyper(const fm_map *fm)
int curr_dim; /* Current dimension to increment */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_file_map_hyper)
/* Sanity check */
@@ -3060,7 +2988,7 @@ H5D_create_chunk_file_map_hyper(const fm_map *fm)
(void)H5S_close(tmp_fchunk);
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't create chunk selection")
} /* end if */
-
+
/* Resize chunk's dataspace dimensions to size of chunk */
if(H5S_set_extent_real(tmp_fchunk,fm->chunk_dim)<0) {
(void)H5S_close(tmp_fchunk);
@@ -3173,7 +3101,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_create_chunk_mem_map_hyper(const fm_map *fm)
{
H5SL_node_t *curr_node; /* Current node in skip list */
@@ -3185,7 +3113,7 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm)
hssize_t chunk_adjust[H5O_LAYOUT_NDIMS]; /* Adjustment to make to a particular chunk */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_mem_map_hyper)
/* Sanity check */
@@ -3292,7 +3220,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_fm)
{
fm_map *fm = (fm_map*)_fm; /* File<->memory chunk mapping info */
@@ -3301,7 +3229,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const
hsize_t chunk_index; /* Chunk index */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_file_cb)
/* Calculate the index of this chunk */
@@ -3408,7 +3336,7 @@ done:
*-------------------------------------------------------------------------
*/
/* ARGSUSED */
-static herr_t
+static herr_t
H5D_chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_fm)
{
fm_map *fm = (fm_map*)_fm; /* File<->memory chunk mapping info */
@@ -3416,7 +3344,7 @@ H5D_chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const
hsize_t coords_in_mem[H5O_LAYOUT_NDIMS]; /* Coordinates of element in memory */
hsize_t chunk_index; /* Chunk index */
herr_t ret_value = SUCCEED; /* Return value */
-
+
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_mem_cb)
/* Calculate the index of this chunk */
@@ -3585,22 +3513,21 @@ done:
*
*-------------------------------------------------------------------------
*/
-static herr_t
+static herr_t
H5D_ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
hid_t
#if !(defined H5_HAVE_PARALLEL || defined H5S_DEBUG)
UNUSED
#endif /* H5_HAVE_PARALLEL */
- dp_dxpl_id, H5D_dxpl_cache_t
-
+ dp_dxpl_id, H5D_dxpl_cache_t
#if !(defined H5_HAVE_PARALLEL || defined H5S_DEBUG)
UNUSED
#endif /* H5_HAVE_PARALLEL */
-*dp_dxpl_cache,const H5S_t
+ *dp_dxpl_cache, const H5S_t
#if !(defined H5_HAVE_PARALLEL || defined H5S_DEBUG)
UNUSED
#endif /* H5_HAVE_PARALLEL */
- *mem_space, const H5S_t
+ *mem_space, const H5S_t
#if !(defined H5_HAVE_PARALLEL || defined H5S_DEBUG)
UNUSED
#endif /* H5_HAVE_PARALLEL */
@@ -3608,7 +3535,7 @@ H5D_ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
#ifndef H5_HAVE_PARALLEL
UNUSED
#endif /* H5_HAVE_PARALLEL */
- flags, hbool_t
+ flags, hbool_t
#ifndef H5_HAVE_PARALLEL
UNUSED
#endif /* H5_HAVE_PARALLEL */
@@ -3662,14 +3589,14 @@ H5D_ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
/* Check if we can use the optimized parallel I/O routines */
if(opt==TRUE) {
/* Set the pointers to the MPI-specific routines */
- io_info->ops.read = H5D_mpio_select_read;
+ io_info->ops.read = H5D_mpio_select_read;
io_info->ops.write = H5D_mpio_select_write;
io_info->ops_sca.read = H5D_select_read;
io_info->ops_sca.write = H5D_select_write;
- *use_par_opt_io=TRUE;
+
/* Indicate that the I/O will use collective */
- }
- /* end if */
+ *use_par_opt_io=TRUE;
+ } /* end if */
else {
/* Indicate that the I/O will _NOT_ be parallel, use independent IO */
*use_par_opt_io=FALSE;
@@ -3694,135 +3621,121 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_ioinfo_init() */
-
#ifdef H5_HAVE_PARALLEL
-
/*-------------------------------------------------------------------------
* Function: H5D_mpio_get_mini_chunk
*
* Purpose: Routine for obtaining minimum number of chunks to cover
- hyperslab selection selected by all processors.
- *
+ * hyperslab selection selected by all processors.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer:
- *
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
-
static herr_t H5D_mpio_get_mini_chunk(const H5D_t *dset,
- const H5S_t *mem_space,
+ const H5S_t *mem_space,
const H5S_t *file_space,
- int *min_chunkf) {
-
-
- hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */
- hsize_t startf[H5S_MAX_RANK], /* Selection start bounds */
- endf[H5S_MAX_RANK]; /* Selection end bounds */
- unsigned dim_rankf; /* Number of dimensions of file dataspace */
- int pcheck_hyper,check_hyper, /* Flags for checking if selection is in one chunk */
- tnum_chunkf, /* Number of chunks selection overlaps */
- max_chunkf, /* Maximum number of chunks selection overlaps */
- num_chunks_same; /* Flag indicating whether all processes have the same # of chunks to operate on */
- unsigned dim_chunks; /* Temporary number of chunks in a dimension */
- MPI_Comm comm; /* MPI communicator for file */
- int mpi_rank; /* Rank in MPI communicator */
- int mpi_code; /* MPI return code */
- unsigned u; /* Local index variable */
- herr_t ret_value;
-
- ret_value = SUCCEED;
- FUNC_ENTER_NOAPI_NOINIT(H5D_mpio_get_mini_chunk);
- /* Getting MPI communicator and rank */
- if((comm = H5F_mpi_get_comm(dset->ent.file))==MPI_COMM_NULL)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
- if((mpi_rank = H5F_mpi_get_rank(dset->ent.file))<0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank")
-
-
- dim_rankf = H5S_GET_EXTENT_NDIMS(file_space);
-
- if(H5S_SELECT_BOUNDS(file_space,startf,endf)==FAIL)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE,FAIL, "invalid check for single selection blocks");
-
- for(u=0; u < dset->shared->layout.u.chunk.ndims; u++)
- chunk_dim[u] = dset->shared->layout.u.chunk.dim[u];
-
-
- /* Compute the number of chunks covered by the selection on this process */
- tnum_chunkf = 1;
- for (u=0; u<dim_rankf; u++) {
- dim_chunks = (endf[u]/chunk_dim[u]-startf[u]/chunk_dim[u])+1;
- tnum_chunkf = dim_chunks*tnum_chunkf;
- }
+ int *min_chunkf)
+{
+ hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */
+ hsize_t startf[H5S_MAX_RANK], /* Selection start bounds */
+ endf[H5S_MAX_RANK]; /* Selection end bounds */
+ unsigned dim_rankf; /* Number of dimensions of file dataspace */
+ int pcheck_hyper,check_hyper, /* Flags for checking if selection is in one chunk */
+ tnum_chunkf, /* Number of chunks selection overlaps */
+ max_chunkf, /* Maximum number of chunks selection overlaps */
+ num_chunks_same; /* Flag indicating whether all processes have the same # of chunks to operate on */
+ unsigned dim_chunks; /* Temporary number of chunks in a dimension */
+ MPI_Comm comm; /* MPI communicator for file */
+ int mpi_rank; /* Rank in MPI communicator */
+ int mpi_code; /* MPI return code */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED;
- /* Determine the minimum and maximum # of chunks for all processes */
+ FUNC_ENTER_NOAPI_NOINIT(H5D_mpio_get_mini_chunk);
- if (MPI_SUCCESS != (mpi_code= MPI_Reduce(&tnum_chunkf,min_chunkf,1,MPI_INT,MPI_MIN,0,comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Reduce failed", mpi_code)
-
-
- /* Broadcast the flag indicating the number of chunks are the same */
- if (MPI_SUCCESS != (mpi_code= MPI_Bcast(min_chunkf,1,MPI_INT,0,comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
+ /* Getting MPI communicator and rank */
+ if((comm = H5F_mpi_get_comm(dset->ent.file))==MPI_COMM_NULL)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
+ if((mpi_rank = H5F_mpi_get_rank(dset->ent.file))<0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI rank")
- done:
- FUNC_LEAVE_NOAPI(ret_value);
-
-}
+ dim_rankf = H5S_GET_EXTENT_NDIMS(file_space);
+
+ /* Get selection bounds */
+ if(H5S_SELECT_BOUNDS(file_space,startf,endf)==FAIL)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE,FAIL, "invalid check for single selection blocks");
+
+ /* Get chunk dimensions */
+ for(u=0; u < dset->shared->layout.u.chunk.ndims; u++)
+ chunk_dim[u] = dset->shared->layout.u.chunk.dim[u];
+
+ /* Compute the number of chunks covered by the selection on this process */
+ tnum_chunkf = 1;
+ for (u=0; u<dim_rankf; u++) {
+ dim_chunks = (endf[u]/chunk_dim[u]-startf[u]/chunk_dim[u])+1;
+ tnum_chunkf = dim_chunks*tnum_chunkf;
+ }
+
+ /* Determine the minimum and maximum # of chunks for all processes */
+ if (MPI_SUCCESS != (mpi_code= MPI_Reduce(&tnum_chunkf,min_chunkf,1,MPI_INT,MPI_MIN,0,comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Reduce failed", mpi_code)
+
+ /* Broadcast the flag indicating the number of chunks are the same */
+ if (MPI_SUCCESS != (mpi_code= MPI_Bcast(min_chunkf,1,MPI_INT,0,comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5D_mpio_get_mini_chunk() */
/*-------------------------------------------------------------------------
* Function: H5D_obtain_duplicate_pid
*
- * Purpose: Routine for obtaining a copy property list ID of
- data transfer property.
-
- *
+ * Purpose: Routine for obtaining a copy property list ID of
+ * data transfer property.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer:
- *
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
-
static herr_t H5D_obtain_duplicate_pid(hid_t dxpl_id,
- hid_t* dp_id,
+ hid_t* dp_id,
H5D_dxpl_cache_t **cache)
{
+ H5FD_mpio_xfer_t xfer_mode;
+ H5P_genplist_t *dp_dx_plist; /* Data transer property list */
+ herr_t ret_value=SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT(H5D_obtain_duplicate_pid)
- H5FD_mpio_xfer_t xfer_mode;
- H5P_genplist_t *dp_dx_plist; /* Data transer property list */
- herr_t ret_value=SUCCEED;
-
- FUNC_ENTER_NOAPI_NOINIT(H5D_obtain_duplicate_pid)
+ *dp_id = H5Pcopy(dxpl_id);
- *dp_id = H5Pcopy(dxpl_id);
-
- /* printf("inside function dp id %d\n",*dp_id);*/
/* Get the dataset transfer property list */
if (NULL == (dp_dx_plist = H5I_object(*dp_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
+ /* Switch to independent I/O */
xfer_mode = H5FD_MPIO_INDEPENDENT;
if(H5P_set (dp_dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode")
-
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode")
+
/* Fill the DXPL cache values for later use */
if (H5D_get_dxpl_cache(*dp_id,cache)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
- done:
+done:
FUNC_LEAVE_NOAPI(ret_value)
-
-}
+} /* end H5D_obtain_duplicate_pid() */
#endif /*H5_HAVE_PARALLEL*/
+
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index f85c551..572aaca 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -40,7 +40,7 @@
static herr_t
H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
- void *buf/*out*/,
+ void *buf/*out*/,
hbool_t do_write);
@@ -60,7 +60,6 @@ H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size,
*
*-------------------------------------------------------------------------
*/
-
htri_t
H5D_mpio_opt_possible( const H5D_t *dset, const H5S_t *mem_space, const H5S_t *file_space, const unsigned flags)
{
@@ -88,16 +87,16 @@ H5D_mpio_opt_possible( const H5D_t *dset, const H5S_t *mem_space, const H5S_t *f
HGOTO_DONE(FALSE);
/* Dataset storage must be contiguous or chunked */
- if ((flags&H5S_CONV_STORAGE_MASK)!=H5S_CONV_STORAGE_CONTIGUOUS &&
+ if ((flags&H5S_CONV_STORAGE_MASK)!=H5S_CONV_STORAGE_CONTIGUOUS &&
(flags&H5S_CONV_STORAGE_MASK)!=H5S_CONV_STORAGE_CHUNKED)
HGOTO_DONE(FALSE);
- /*The handling of memory space is different for chunking
+ /*The handling of memory space is different for chunking
and contiguous storage,
For contigous storage, mem_space and file_space won't
change when it it is doing disk IO.
For chunking storage, mem_space will change for different
- chunks. So for chunking storage, whether we can use
+ chunks. So for chunking storage, whether we can use
collective IO will defer until the each chunk IO is reached.
For contiguous storage, if we find the MPI-IO cannot
support complicated MPI derived data type, we will
@@ -111,16 +110,13 @@ H5D_mpio_opt_possible( const H5D_t *dset, const H5S_t *mem_space, const H5S_t *f
HGOTO_DONE(FALSE);
#endif
}
-
- if(dset->shared->layout.type == H5D_CHUNKED)
- if(dset->shared->dcpl_cache.pline.nused>0)
- HGOTO_DONE(FALSE); /* Perform the independent write operation */
-
+ if(dset->shared->layout.type == H5D_CHUNKED)
+ if(dset->shared->dcpl_cache.pline.nused>0)
+ HGOTO_DONE(FALSE); /* Perform the independent write operation */
done:
FUNC_LEAVE_NOAPI(ret_value);
-
} /* H5D_mpio_opt_possible() */
@@ -219,7 +215,7 @@ H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size,
else {
haddr_t chunk_addr; /* for collective chunk IO */
- assert(io_info->dset->shared->layout.type == H5D_CHUNKED);
+ assert(io_info->dset->shared->layout.type == H5D_CHUNKED);
chunk_addr=H5D_istore_get_addr(io_info,NULL);
addr = H5F_BASE_ADDR(io_info->dset->ent.file) + chunk_addr + mpi_file_offset;
}
@@ -288,8 +284,7 @@ H5D_mpio_select_read(H5D_io_info_t *io_info,
FUNC_ENTER_NOAPI_NOFUNC(H5D_mpio_select_read);
-
- ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space,
+ ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space,
mem_space, buf, 0/*read*/);
FUNC_LEAVE_NOAPI(ret_value);
@@ -321,11 +316,10 @@ H5D_mpio_select_write(H5D_io_info_t *io_info,
FUNC_ENTER_NOAPI_NOFUNC(H5D_mpio_select_write);
/*OKAY: CAST DISCARDS CONST QUALIFIER*/
- ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space,
+ ret_value = H5D_mpio_spaces_xfer(io_info, elmt_size, file_space,
mem_space, (void*)buf, 1/*write*/);
FUNC_LEAVE_NOAPI(ret_value);
} /* end H5D_mpio_spaces_write() */
-
-
#endif /* H5_HAVE_PARALLEL */
+
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 7b2c9e3..3137f53 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -233,12 +233,12 @@ H5_DLL ssize_t H5D_contig_writevv(const H5D_io_info_t *io_info,
/* Functions that operate on compact dataset storage */
H5_DLL ssize_t H5D_compact_readvv(const H5D_io_info_t *io_info,
- size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
- size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
+ size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
+ size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
void *buf);
H5_DLL ssize_t H5D_compact_writevv(const H5D_io_info_t *io_info,
- size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
- size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
+ size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
+ size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
const void *buf);
/* Functions that operate on indexed storage */
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index a9b90ee..48f2358 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -66,7 +66,7 @@ H5S_mpio_span_hyper_type( const H5S_t *space, size_t elmt_size,
hsize_t *extra_offset,
hbool_t *is_derived_type );
-static herr_t H5S_obtain_datatype(const hsize_t size[],
+static herr_t H5S_obtain_datatype(const hsize_t size[],
H5S_hyper_span_t* span,MPI_Datatype *span_type,
size_t elmt_size,int dimindex);
@@ -314,10 +314,10 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
goto empty;
}
} /* end else */
-
+
/**********************************************************************
Compute array "offset[rank]" which gives the offsets for a multi-
- dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1).
+ dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1).
**********************************************************************/
offset[rank-1] = 1;
max_xtent[rank-1] = d[rank-1].xtent;
@@ -397,9 +397,9 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
block_length[0] = 1;
block_length[1] = 1;
block_length[2] = 1;
-
+
displacement[0] = 0;
-
+
old_types[0] = MPI_LB;
old_types[1] = outer_type;
old_types[2] = MPI_UB;
@@ -414,7 +414,7 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
displacement, /* displacements */
old_types, /* old types */
&inner_type); /* new type */
-
+
MPI_Type_free (&outer_type);
if (mpi_code!=MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code);
@@ -424,7 +424,7 @@ H5S_mpio_hyper_type( const H5S_t *space, size_t elmt_size,
}
} /* end for */
/***************************
-* End of loop, walking
+* End of loop, walking
* thru dimensions.
***************************/
@@ -469,7 +469,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5S_mpio_span_hyper_type
*
- * Purpose: Translate an HDF5 irregular hyperslab selection into an
+ * Purpose: Translate an HDF5 irregular hyperslab selection into an
MPI type.
*
* Return: non-negative on success, negative on failure.
@@ -482,9 +482,9 @@ done:
*
* Programmer: kyang
*
- */
+ */
static herr_t
-H5S_mpio_span_hyper_type( const H5S_t *space,
+H5S_mpio_span_hyper_type( const H5S_t *space,
size_t elmt_size,
MPI_Datatype *new_type,/* out: */
size_t *count,
@@ -499,26 +499,26 @@ H5S_mpio_span_hyper_type( const H5S_t *space,
herr_t ret_value = SUCCEED;
MPI_Aint extent,lb;
-
+
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5S_mpio_span_hyper_type);
/* Check args */
assert (space);
-
+
/* assert(sizeof(MPI_Aint) >= sizeof(elmt_size)); not sure the reason*/
rank = space->extent.rank;
- /* size = HDcalloc((size_t)rank,sizeof(hsize_t)); */
+ /* size = HDcalloc((size_t)rank,sizeof(hsize_t)); */
if (0==elmt_size)
goto empty;
size = space->extent.size;
- if(size == 0)
+ if(size == 0)
goto empty;
-
+
odown = space->select.sel_info.hslab->span_lst;
- if(odown == NULL)
+ if(odown == NULL)
goto empty;
ospan = odown->head;
if(ospan == NULL)
@@ -527,13 +527,13 @@ H5S_mpio_span_hyper_type( const H5S_t *space,
/* obtain derived data type */
H5S_obtain_datatype(space->extent.size,ospan,&span_type,elmt_size,rank);
MPI_Type_commit(&span_type);
-
+
*new_type = span_type;
/* fill in the remaining return values */
*count = 1;
*extra_offset = 0;
*is_derived_type = 1;
-
+
HGOTO_DONE(SUCCEED);
empty:
@@ -551,8 +551,8 @@ done:
/*-------------------------------------------------------------------------
* Function: obtain datatype
*
- * Purpose: Obtain an MPI derived datatype based on span-tree
- implementation
+ * Purpose: Obtain an MPI derived datatype based on span-tree
+ implementation
*
* Return: non-negative on success, negative on failure.
*
@@ -560,13 +560,13 @@ done:
*
* Programmer: kyang
*
- */
-static herr_t H5S_obtain_datatype(const hsize_t size[],
+ */
+static herr_t H5S_obtain_datatype(const hsize_t size[],
H5S_hyper_span_t* span,
MPI_Datatype *span_type,
size_t elmt_size,
- int dimindex) {
-
+ int dimindex)
+{
int innercount,outercount;
MPI_Datatype bas_type;
MPI_Datatype temp_type;
@@ -579,7 +579,7 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
H5S_hyper_span_info_t *down;
H5S_hyper_span_t *tspan;
int mpi_code;
- herr_t ret_value;
+ herr_t ret_value = SUCCEED;
#ifdef H5_HAVE_MPI2
MPI_Aint sizeaint,sizedtype;
@@ -587,10 +587,10 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
hsize_t total_lowd,total_lowd1;
int i;
int ret;
-
+
FUNC_ENTER_NOAPI_NOINIT(H5S_obtain_datatype);
- ret_value = SUCCEED;
assert(span);
+
inner_type = NULL;
down = NULL;
tspan = NULL;
@@ -625,8 +625,8 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
inner_type = (MPI_Datatype *)HDcalloc((size_t)outercount,sizeof(int));
#endif
-
-
+
+
tspan = span;
outercount = 0;
@@ -650,7 +650,7 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
if(MPI_SUCCESS != (mpi_code = MPI_Type_hindexed(outercount,blocklen,
disp,bas_type,span_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_hindexed failed", mpi_code);
-
+
}
else {/* dimindex is the rank of the dimension */
@@ -664,9 +664,9 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
for ( i = dimindex-1; i > 1; i--)
total_lowd1 = total_lowd1 * size[i];
-
+
while(tspan){
-
+
/* Displacement should be in byte and should have dimension information */
/* First using MPI Type vector to build derived data type for this span only */
/* Need to calculate the disp in byte for this dimension. */
@@ -676,7 +676,7 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
blocklen[outercount] = 1;
/* generating inner derived datatype by using MPI_Type_hvector */
- if(FAIL == H5S_obtain_datatype(size,tspan->down->head,&temp_type,elmt_size,dimindex-1))
+ if(FAIL == H5S_obtain_datatype(size,tspan->down->head,&temp_type,elmt_size,dimindex-1))
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't obtain MPI derived data type");
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&temp_type)))
@@ -698,20 +698,18 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
tspan = tspan->next;
}
-
+
/* building the whole vector datatype */
-
- if(MPI_SUCCESS != (mpi_code =
+ if(MPI_SUCCESS != (mpi_code =
MPI_Type_struct(outercount,blocklen,disp,inner_type,span_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_struct failed", mpi_code);
}
-
+
if(inner_type != NULL){
if(down != NULL) {
- for(i=0;i<outercount;i++)
+ for(i=0;i<outercount;i++)
MPI_Type_free(&inner_type[i]);
-
}
}
@@ -721,7 +719,7 @@ static herr_t H5S_obtain_datatype(const hsize_t size[],
HDfree(blocklen);
if(disp != NULL)
HDfree(disp);
- done:
+done:
FUNC_LEAVE_NOAPI(ret_value);
}