summaryrefslogtreecommitdiffstats
path: root/src/H5Smpio.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5Smpio.c')
-rw-r--r--src/H5Smpio.c481
1 files changed, 286 insertions, 195 deletions
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index 28bd253..147fd25 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -31,6 +31,7 @@
#include "H5private.h" /* Generic Functions */
#include "H5Dprivate.h" /* Datasets */
#include "H5Eprivate.h" /* Error handling */
+#include "H5FLprivate.h" /* Free Lists */
#include "H5MMprivate.h" /* Memory management */
#include "H5Spkg.h" /* Dataspaces */
#include "H5VMprivate.h" /* Vector and array functions */
@@ -50,6 +51,19 @@
/* Local Typedefs */
/******************/
+/* Node in linked list of MPI data types created during traversal of irregular hyperslab selection */
+typedef struct H5S_mpio_mpitype_node_t {
+ MPI_Datatype type; /* MPI Datatype */
+ struct H5S_mpio_mpitype_node_t *next; /* Pointer to next node in list */
+} H5S_mpio_mpitype_node_t;
+
+/* List to track MPI data types generated during traversal of irregular hyperslab selection */
+typedef struct H5S_mpio_mpitype_list_t {
+ H5S_mpio_mpitype_node_t *head; /* Pointer to head of list */
+ H5S_mpio_mpitype_node_t *tail; /* Pointer to tail of list */
+} H5S_mpio_mpitype_list_t;
+
+
/********************/
/* Local Prototypes */
/********************/
@@ -65,12 +79,14 @@ static herr_t H5S__mpio_point_type(const H5S_t *space, size_t elmt_size,
static herr_t H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size,
hsize_t **permute_map, MPI_Datatype *new_type, int *count,
hbool_t *is_derived_type);
-static herr_t H5S__mpio_hyper_type(const H5S_t *space, size_t elmt_size,
+static herr_t H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
static herr_t H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
-static herr_t H5S__obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span,
- const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size);
+static herr_t H5S__release_datatype(H5S_mpio_mpitype_list_t *type_list);
+static herr_t H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
+ size_t elmt_size, const MPI_Datatype *elmt_type, MPI_Datatype *span_type,
+ H5S_mpio_mpitype_list_t *type_list, unsigned op_info_i, uint64_t op_gen);
/*****************************/
@@ -83,6 +99,8 @@ static herr_t H5S__obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span,
/*********************/
+/* Declare a free list to manage the H5S_mpio_mpitype_node_t struct */
+H5FL_DEFINE_STATIC(H5S_mpio_mpitype_node_t);
/*-------------------------------------------------------------------------
@@ -385,9 +403,12 @@ H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_typ
/* Iterate through list of elements */
curr = space->select.sel_info.pnt_lst->head;
for(u = 0 ; u < num_points ; u++) {
- /* calculate the displacement of the current point */
- disp[u] = H5VM_array_offset(space->extent.rank, space->extent.size, curr->pnt);
- disp[u] *= elmt_size;
+ /* Calculate the displacement of the current point */
+ hsize_t disp_tmp = H5VM_array_offset(space->extent.rank, space->extent.size, curr->pnt);
+ if(disp_tmp > LONG_MAX) /* Maximum value of type long */
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "disp overflow")
+ disp[u] = (MPI_Aint)disp_tmp;
+ disp[u] *= (MPI_Aint)elmt_size;
/* This is a File Space used to set the file view, so adjust the displacements
* to have them monotonically non-decreasing.
@@ -514,7 +535,7 @@ H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
/* Initialize selection iterator */
- if(H5S_select_iter_init(&sel_iter, space, elmt_size) < 0)
+ if(H5S_select_iter_init(&sel_iter, space, elmt_size, 0) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
sel_iter_init = TRUE; /* Selection iteration info has been initialized */
@@ -531,7 +552,7 @@ H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
size_t curr_seq; /* Current sequence being worked on */
/* Get the sequences of bytes */
- if(H5S_SELECT_GET_SEQ_LIST(space, 0, &sel_iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off, len) < 0)
+ if(H5S_SELECT_ITER_GET_SEQ_LIST(&sel_iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off, len) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
/* Loop, while sequences left to process */
@@ -548,7 +569,9 @@ H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
/* Loop, while bytes left in sequence */
while(curr_len > 0) {
/* Set the displacement of the current point */
- disp[u] = curr_off;
+ if(curr_off > LONG_MAX)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "curr_off overflow")
+ disp[u] = (MPI_Aint)curr_off;
/* This is a memory displacement, so for each point selected,
* apply the map that was generated by the file selection */
@@ -602,9 +625,9 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5S__mpio_hyper_type
+ * Function: H5S__mpio_reg_hyper_type
*
- * Purpose: Translate an HDF5 hyperslab selection into an MPI type.
+ * Purpose: Translate a regular HDF5 hyperslab selection into an MPI type.
*
* Return: Non-negative on success, negative on failure.
*
@@ -618,7 +641,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__mpio_hyper_type(const H5S_t *space, size_t elmt_size,
+H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
H5S_sel_iter_t sel_iter; /* Selection iteration info */
@@ -653,7 +676,7 @@ H5S__mpio_hyper_type(const H5S_t *space, size_t elmt_size,
bigio_count = H5_mpi_get_bigio_count();
/* Initialize selection iterator */
- if(H5S_select_iter_init(&sel_iter, space, elmt_size) < 0)
+ if(H5S_select_iter_init(&sel_iter, space, elmt_size, 0) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
sel_iter_init = TRUE; /* Selection iteration info has been initialized */
@@ -667,7 +690,6 @@ H5S__mpio_hyper_type(const H5S_t *space, size_t elmt_size,
if(sel_iter.u.hyp.iter_rank != 0 && sel_iter.u.hyp.iter_rank < space->extent.rank) {
/* Flattened selection */
rank = sel_iter.u.hyp.iter_rank;
- HDassert(rank <= H5S_MAX_RANK); /* within array bounds */
#ifdef H5S_DEBUG
if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), "%s: Flattened selection\n",FUNC);
@@ -700,9 +722,6 @@ if(H5DEBUG(S)) {
else {
/* Non-flattened selection */
rank = space->extent.rank;
- HDassert(rank <= H5S_MAX_RANK); /* within array bounds */
- if(0 == rank)
- goto empty;
#ifdef H5S_DEBUG
if(H5DEBUG(S))
HDfprintf(H5DEBUG(S),"%s: Non-flattened selection\n",FUNC);
@@ -880,8 +899,14 @@ if(H5DEBUG(S))
****************************************/
/* Calculate start and extent values of this dimension */
- start_disp = d[i].start * offset[i] * elmt_size;
- new_extent = (MPI_Aint)elmt_size * max_xtent[i];
+ /* Check if value overflow to cast to type MPI_Aint */
+ if(d[i].start > LONG_MAX || offset[i] > LONG_MAX || elmt_size > LONG_MAX)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "result overflow")
+ start_disp = (MPI_Aint)d[i].start * (MPI_Aint)offset[i] * (MPI_Aint)elmt_size;
+
+ if(max_xtent[i] > LONG_MAX)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "max_xtent overflow")
+ new_extent = (MPI_Aint)elmt_size * (MPI_Aint)max_xtent[i];
if(MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(outer_type, &lb, &extent_len)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_get_extent failed", mpi_code)
@@ -921,13 +946,6 @@ if(H5DEBUG(S))
/* fill in the remaining return values */
*count = 1; /* only have to move one of these suckers! */
*is_derived_type = TRUE;
- HGOTO_DONE(SUCCEED);
-
-empty:
- /* special case: empty hyperslab */
- *new_type = MPI_BYTE;
- *count = 0;
- *is_derived_type = FALSE;
done:
/* Release selection iterator */
@@ -940,7 +958,7 @@ if(H5DEBUG(S))
HDfprintf(H5DEBUG(S), "Leave %s, count=%ld is_derived_type=%t\n", FUNC, *count, *is_derived_type);
#endif
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5S__mpio_hyper_type() */
+} /* end H5S__mpio_reg_hyper_type() */
/*-------------------------------------------------------------------------
@@ -964,11 +982,13 @@ static herr_t
H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
{
+ H5S_mpio_mpitype_list_t type_list; /* List to track MPI data types created */
MPI_Datatype elmt_type; /* MPI datatype for an element */
hbool_t elmt_type_is_derived = FALSE; /* Whether the element type has been created */
MPI_Datatype span_type; /* MPI datatype for overall span tree */
hsize_t bigio_count; /* Transition point to create derived type */
hsize_t down[H5S_MAX_RANK]; /* 'down' sizes for each dimension */
+ uint64_t op_gen; /* Operation generation value */
int mpi_code; /* MPI return code */
herr_t ret_value = SUCCEED; /* Return value */
@@ -995,12 +1015,23 @@ H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
if(H5VM_array_down(space->extent.rank, space->extent.size, down) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGETSIZE, FAIL, "couldn't compute 'down' dimension sizes")
- /* Obtain derived data type */
- if(H5S__obtain_datatype(down, space->select.sel_info.hslab->span_lst->head, &elmt_type, &span_type, elmt_size) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&span_type)))
+ /* Acquire an operation generation value for creating MPI datatypes */
+ op_gen = H5S__hyper_get_op_gen();
+
+ /* Obtain derived MPI data type */
+ /* Always use op_info[0] since we own this op_info, so there can be no
+ * simultaneous operations */
+ type_list.head = type_list.tail = NULL;
+ if(H5S__obtain_datatype(space->select.sel_info.hslab->span_lst, down, elmt_size, &elmt_type, &span_type, &type_list, 0, op_gen) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_dup(span_type, new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
- *new_type = span_type;
+
+ /* Release MPI data types generated during span tree traversal */
+ if(H5S__release_datatype(&type_list) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "couldn't release MPI derived data type")
/* fill in the remaining return values */
*count = 1;
@@ -1017,6 +1048,53 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5S__release_datatype
+ *
+ * Purpose: Release the MPI derived datatypes for span-tree hyperslab selection
+ *
+ * Return: Non-negative on success, negative on failure.
+ *
+ * Programmer: Quincey Koziol, February 2, 2019
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5S__release_datatype(H5S_mpio_mpitype_list_t *type_list)
+{
+ H5S_mpio_mpitype_node_t *curr; /* Pointer to head of list */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(type_list);
+
+ /* Iterate over the list, freeing the MPI data types */
+ curr = type_list->head;
+ while(curr) {
+ H5S_mpio_mpitype_node_t *next; /* Pointer to next node in list */
+ int mpi_code; /* MPI return status code */
+
+ /* Release the MPI data type for this span tree */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&curr->type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+
+ /* Get pointer to next node in list */
+ next = curr->next;
+
+ /* Free the current node */
+ curr = H5FL_FREE(H5S_mpio_mpitype_node_t, curr);
+
+ /* Advance to next node */
+ curr = next;
+ } /* end while */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5S__release_datatype() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5S__obtain_datatype
*
* Purpose: Obtain an MPI derived datatype for span-tree hyperslab selection
@@ -1030,8 +1108,9 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__obtain_datatype(const hsize_t *down, H5S_hyper_span_t *spans,
- const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size)
+H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
+ size_t elmt_size, const MPI_Datatype *elmt_type, MPI_Datatype *span_type,
+ H5S_mpio_mpitype_list_t *type_list, unsigned op_info_i, uint64_t op_gen)
{
H5S_hyper_span_t *span; /* Hyperslab span to iterate with */
hsize_t bigio_count; /* Transition point to create derived type */
@@ -1039,8 +1118,6 @@ H5S__obtain_datatype(const hsize_t *down, H5S_hyper_span_t *spans,
size_t outercount = 0; /* Number of span tree nodes at this level */
MPI_Datatype *inner_type = NULL;
hbool_t inner_types_freed = FALSE; /* Whether the inner_type MPI datatypes have been freed */
- hbool_t span_type_valid = FALSE; /* Whether the span_type MPI datatypes is valid */
- hbool_t large_block = FALSE; /* Wether the block length is larger than 32 bit integer */
int *blocklen = NULL;
MPI_Aint *disp = NULL;
size_t u; /* Local index variable */
@@ -1051,173 +1128,194 @@ H5S__obtain_datatype(const hsize_t *down, H5S_hyper_span_t *spans,
/* Sanity check */
HDassert(spans);
+ HDassert(type_list);
bigio_count = H5_mpi_get_bigio_count();
- /* Allocate the initial displacement & block length buffers */
- alloc_count = H5S_MPIO_INITIAL_ALLOC_COUNT;
- if(NULL == (disp = (MPI_Aint *)H5MM_malloc(alloc_count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
- if(NULL == (blocklen = (int *)H5MM_malloc(alloc_count * sizeof(int))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
-
- /* if this is the fastest changing dimension, it is the base case for derived datatype. */
- if(NULL == spans->down) {
- span = spans;
- outercount = 0;
- while(span) {
- /* Check if we need to increase the size of the buffers */
- if(outercount >= alloc_count) {
- MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
- int *tmp_blocklen; /* Temporary pointer to new block length buffer */
-
- /* Double the allocation count */
- alloc_count *= 2;
-
- /* Re-allocate the buffers */
- if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
- disp = tmp_disp;
- if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
- blocklen = tmp_blocklen;
+ /* Check if we've visited this span tree before */
+ if(spans->op_info[op_info_i].op_gen != op_gen) {
+ H5S_mpio_mpitype_node_t *type_node; /* Pointer to new node in MPI data type list */
+
+ /* Allocate the initial displacement & block length buffers */
+ alloc_count = H5S_MPIO_INITIAL_ALLOC_COUNT;
+ if(NULL == (disp = (MPI_Aint *)H5MM_malloc(alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ if(NULL == (blocklen = (int *)H5MM_malloc(alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+
+ /* If this is the fastest changing dimension, it is the base case for derived datatype. */
+ span = spans->head;
+ if(NULL == span->down) {
+ hbool_t large_block = FALSE; /* Wether the block length is larger than 32 bit integer */
+
+ outercount = 0;
+ while(span) {
+ hsize_t nelmts; /* # of elements covered by current span */
+
+ /* Check if we need to increase the size of the buffers */
+ if(outercount >= alloc_count) {
+ MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
+ int *tmp_blocklen; /* Temporary pointer to new block length buffer */
+
+ /* Double the allocation count */
+ alloc_count *= 2;
+
+ /* Re-allocate the buffers */
+ if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ disp = tmp_disp;
+ if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ blocklen = tmp_blocklen;
+ } /* end if */
+
+ /* Compute the number of elements to attempt in this span */
+ nelmts = (span->high - span->low) + 1;
+
+ /* Store displacement & block length */
+ disp[outercount] = (MPI_Aint)elmt_size * (MPI_Aint)span->low;
+ H5_CHECK_OVERFLOW(nelmts, hsize_t, int)
+ blocklen[outercount] = (int)nelmts;
+
+ if(bigio_count < (hsize_t)blocklen[outercount])
+ large_block = TRUE; /* at least one block type is large, so set this flag to true */
+
+ span = span->next;
+ outercount++;
+ } /* end while */
+
+ /* Everything fits into integers, so cast them and use hindexed */
+ if(bigio_count >= outercount && large_block == FALSE) {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type, &spans->op_info[op_info_i].u.down_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
} /* end if */
+ else { /* LARGE_DATATYPE:: Something doesn't fit into a 32 bit integer */
+ for(u = 0 ; u < outercount; u++) {
+ MPI_Datatype temp_type = MPI_DATATYPE_NULL;
+
+ /* create the block type from elmt_type while checking the 32 bit int limit */
+ if((hsize_t)(blocklen[u]) > bigio_count) {
+ if(H5_mpio_create_large_type((hsize_t)blocklen[u], 0, *elmt_type, &temp_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large element datatype in span_hyper selection")
+ } /* end if */
+ else
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)blocklen[u], *elmt_type, &temp_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+
+ /* Combine the current datatype that is created with this current block type */
+ if(0 == u) /* first iteration, there is no combined datatype yet */
+ spans->op_info[op_info_i].u.down_type = temp_type;
+ else {
+ int bl[2] = {1, 1};
+ MPI_Aint ds[2] = {disp[u - 1], disp[u]};
+ MPI_Datatype dt[2] = {spans->op_info[op_info_i].u.down_type, temp_type};
+
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(2, /* count */
+ bl, /* blocklength */
+ ds, /* stride in bytes*/
+ dt, /* old type */
+ &spans->op_info[op_info_i].u.down_type))) /* new type */
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+
+ /* Release previous temporary datatype */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&temp_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ } /* end else */
+ } /* end for */
+ } /* end else (LARGE_DATATYPE::) */
+ } /* end if */
+ else {
+ MPI_Aint stride; /* Distance between inner MPI datatypes */
- /* Store displacement & block length */
- disp[outercount] = (MPI_Aint)elmt_size * span->low;
- H5_CHECK_OVERFLOW(span->nelem, hsize_t, int)
- blocklen[outercount] = (int)span->nelem;
- span = span->next;
+ if(NULL == (inner_type = (MPI_Datatype *)H5MM_malloc(alloc_count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
- if (bigio_count < blocklen[outercount]) {
- large_block = TRUE; /* at least one block type is large, so set this flag to true */
- }
+ /* Calculate the total bytes of the lower dimension */
+ stride = (MPI_Aint)(*down) * (MPI_Aint)elmt_size;
+
+ /* Loop over span nodes */
+ outercount = 0;
+ while(span) {
+ MPI_Datatype down_type; /* Temporary MPI datatype for a span tree node's children */
+ hsize_t nelmts; /* # of elements covered by current span */
+
+ /* Check if we need to increase the size of the buffers */
+ if(outercount >= alloc_count) {
+ MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
+ int *tmp_blocklen; /* Temporary pointer to new block length buffer */
+ MPI_Datatype *tmp_inner_type; /* Temporary pointer to inner MPI datatype buffer */
+
+ /* Double the allocation count */
+ alloc_count *= 2;
+
+ /* Re-allocate the buffers */
+ if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ disp = tmp_disp;
+ if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ blocklen = tmp_blocklen;
+ if(NULL == (tmp_inner_type = (MPI_Datatype *)H5MM_realloc(inner_type, alloc_count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
+ inner_type = tmp_inner_type;
+ } /* end if */
- outercount++;
- } /* end while */
+ /* Displacement should be in byte and should have dimension information */
+ /* First using MPI Type vector to build derived data type for this span only */
+ /* Need to calculate the disp in byte for this dimension. */
+ disp[outercount] = (MPI_Aint)span->low * stride;
+ blocklen[outercount] = 1;
- /* Everything fits into integers, so cast them and use hindexed */
- if(bigio_count >= outercount && large_block == FALSE) {
+ /* Generate MPI datatype for next dimension down */
+ if(H5S__obtain_datatype(span->down, down + 1, elmt_size, elmt_type, &down_type, type_list, op_info_i, op_gen) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
- span_type_valid = TRUE;
- }
- else { /* LARGE_DATATYPE:: Something doesn't fit into a 32 bit integer */
- size_t i;
-
- for(i=0 ; i<outercount ; i++) {
- MPI_Datatype temp_type = MPI_DATATYPE_NULL, outer_type = MPI_DATATYPE_NULL;
- /* create the block type from elmt_type while checking the 32 bit int limit */
- if(blocklen[i] > bigio_count) {
- if(H5_mpio_create_large_type (blocklen[i], 0, *elmt_type, &temp_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large element datatype in span_hyper selection")
- }
- else
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)blocklen[i], *elmt_type, &temp_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
-
- /* combine the current datatype that is created with this current block type */
- if(0 == i) { /* first iteration, there is no combined datatype yet */
- *span_type = temp_type;
- }
- else {
- int bl[2] = {1,1};
- MPI_Aint ds[2] = {disp[i-1],disp[i]};
- MPI_Datatype dt[2] = {*span_type, temp_type};
-
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct (2, /* count */
- bl, /* blocklength */
- ds, /* stride in bytes*/
- dt, /* old type */
- &outer_type))){ /* new type */
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
- }
- *span_type = outer_type;
- }
-
- if(outer_type != MPI_DATATYPE_NULL)
- MPI_Type_free(&outer_type);
- /* temp_type shouldn't be freed here...
- * Note that we have simply copied it above (not MPI_Type_dup)
- * into the 'span_type' argument of the caller.
- * The caller needs to deal with it there!
- */
- }
- } /* end (LARGE_DATATYPE::) */
+ /* Compute the number of elements to attempt in this span */
+ nelmts = (span->high - span->low) + 1;
- } /* end if */
- else {
+ /* Build the MPI datatype for this node */
+ H5_CHECK_OVERFLOW(nelmts, hsize_t, int)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)nelmts, 1, stride, down_type, &inner_type[outercount])))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
- if(NULL == (inner_type = (MPI_Datatype *)H5MM_malloc(alloc_count * sizeof(MPI_Datatype))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
+ span = span->next;
+ outercount++;
+ } /* end while */
- span = spans;
- outercount = 0;
- while(span) {
- MPI_Datatype down_type; /* Temporary MPI datatype for a span tree node's children */
- MPI_Aint stride; /* Distance between inner MPI datatypes */
+ /* Building the whole vector datatype */
+ H5_CHECK_OVERFLOW(outercount, size_t, int)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)outercount, blocklen, disp, inner_type, &spans->op_info[op_info_i].u.down_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
- /* Check if we need to increase the size of the buffers */
- if(outercount >= alloc_count) {
- MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
- int *tmp_blocklen; /* Temporary pointer to new block length buffer */
- MPI_Datatype *tmp_inner_type; /* Temporary pointer to inner MPI datatype buffer */
-
- /* Double the allocation count */
- alloc_count *= 2;
-
- /* Re-allocate the buffers */
- if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
- disp = tmp_disp;
- if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
- blocklen = tmp_blocklen;
- if(NULL == (tmp_inner_type = (MPI_Datatype *)H5MM_realloc(inner_type, alloc_count * sizeof(MPI_Datatype))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
- inner_type = tmp_inner_type;
- } /* end if */
+ /* Release inner node types */
+ for(u = 0; u < outercount; u++)
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ inner_types_freed = TRUE;
+ } /* end else */
- /* Displacement should be in byte and should have dimension information */
- /* First using MPI Type vector to build derived data type for this span only */
- /* Need to calculate the disp in byte for this dimension. */
- /* Calculate the total bytes of the lower dimension */
- disp[outercount] = span->low * (*down) * elmt_size;
- blocklen[outercount] = 1;
-
- /* Generate MPI datatype for next dimension down */
- if(H5S__obtain_datatype(down + 1, span->down->head, elmt_type, &down_type, elmt_size) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
-
- /* Build the MPI datatype for this node */
- stride = (*down) * elmt_size;
- H5_CHECK_OVERFLOW(span->nelem, hsize_t, int)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)span->nelem, 1, stride, down_type, &inner_type[outercount]))) {
- MPI_Type_free(&down_type);
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
- } /* end if */
+ /* Allocate space for the MPI data type list node */
+ if(NULL == (type_node = H5FL_MALLOC(H5S_mpio_mpitype_node_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate MPI data type list node")
+
+ /* Set up MPI type node */
+ type_node->type = spans->op_info[op_info_i].u.down_type;
+ type_node->next = NULL;
+
+ /* Add MPI type node to list */
+ if(type_list->head == NULL)
+ type_list->head = type_list->tail = type_node;
+ else {
+ type_list->tail->next = type_node;
+ type_list->tail = type_node;
+ } /* end else */
- /* Release MPI datatype for next dimension down */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&down_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
-
- span = span->next;
- outercount++;
- } /* end while */
-
- /* building the whole vector datatype */
- H5_CHECK_OVERFLOW(outercount, size_t, int)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)outercount, blocklen, disp, inner_type, span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
- span_type_valid = TRUE;
-
- /* Release inner node types */
- for(u = 0; u < outercount; u++)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- inner_types_freed = TRUE;
+ /* Remember that we've visited this span tree */
+ spans->op_info[op_info_i].op_gen = op_gen;
} /* end else */
+ /* Return MPI data type for span tree */
+ *span_type = spans->op_info[op_info_i].u.down_type;
+
done:
/* General cleanup */
if(inner_type != NULL) {
@@ -1232,13 +1330,6 @@ done:
if(disp != NULL)
H5MM_free(disp);
- /* Error cleanup */
- if(ret_value < 0) {
- if(span_type_valid)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(span_type)))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- } /* end if */
-
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S__obtain_datatype() */
@@ -1326,7 +1417,7 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
case H5S_SEL_HYPERSLABS:
if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
- if(H5S__mpio_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ if(H5S__mpio_reg_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert regular 'hyperslab' selection to MPI type")
} /* end if */
else