summaryrefslogtreecommitdiffstats
path: root/src/H5Smpio.c
diff options
context:
space:
mode:
authorAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
committerAllen Byrne <byrn@hdfgroup.org>2020-09-30 14:27:10 (GMT)
commitb2d661b508a7fc7a2592c13bc6bdc175551f075d (patch)
tree13baeb0d83a7c2a4c6299993c182b1227c2f6114 /src/H5Smpio.c
parent29ab58b58dce556639ea3154e262895773a8a8df (diff)
downloadhdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.zip
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.gz
hdf5-b2d661b508a7fc7a2592c13bc6bdc175551f075d.tar.bz2
Clang-format of source files
Diffstat (limited to 'src/H5Smpio.c')
-rw-r--r--src/H5Smpio.c925
1 files changed, 472 insertions, 453 deletions
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index 2f8c43a..f737944 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -22,26 +22,25 @@
/* Module Setup */
/****************/
-#include "H5Smodule.h" /* This source code file is part of the H5S module */
-
+#include "H5Smodule.h" /* This source code file is part of the H5S module */
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5Dprivate.h" /* Datasets */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5FLprivate.h" /* Free Lists */
-#include "H5MMprivate.h" /* Memory management */
-#include "H5Spkg.h" /* Dataspaces */
-#include "H5VMprivate.h" /* Vector and array functions */
+#include "H5private.h" /* Generic Functions */
+#include "H5Dprivate.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MMprivate.h" /* Memory management */
+#include "H5Spkg.h" /* Dataspaces */
+#include "H5VMprivate.h" /* Vector and array functions */
#ifdef H5_HAVE_PARALLEL
/****************/
/* Local Macros */
/****************/
-#define H5S_MPIO_INITIAL_ALLOC_COUNT 256
+#define H5S_MPIO_INITIAL_ALLOC_COUNT 256
/*******************/
/* Local Variables */
@@ -53,56 +52,49 @@
/* Node in linked list of MPI data types created during traversal of irregular hyperslab selection */
typedef struct H5S_mpio_mpitype_node_t {
- MPI_Datatype type; /* MPI Datatype */
- struct H5S_mpio_mpitype_node_t *next; /* Pointer to next node in list */
+ MPI_Datatype type; /* MPI Datatype */
+ struct H5S_mpio_mpitype_node_t *next; /* Pointer to next node in list */
} H5S_mpio_mpitype_node_t;
/* List to track MPI data types generated during traversal of irregular hyperslab selection */
typedef struct H5S_mpio_mpitype_list_t {
- H5S_mpio_mpitype_node_t *head; /* Pointer to head of list */
- H5S_mpio_mpitype_node_t *tail; /* Pointer to tail of list */
+ H5S_mpio_mpitype_node_t *head; /* Pointer to head of list */
+ H5S_mpio_mpitype_node_t *tail; /* Pointer to tail of list */
} H5S_mpio_mpitype_list_t;
-
/********************/
/* Local Prototypes */
/********************/
-static herr_t H5S__mpio_all_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
-static herr_t H5S__mpio_none_type(MPI_Datatype *new_type, int *count,
- hbool_t *is_derived_type);
-static herr_t H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points,
- MPI_Aint *disp, MPI_Datatype *new_type);
-static herr_t H5S__mpio_point_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type,
- hbool_t do_permute, hsize_t **permute_map, hbool_t *is_permuted);
-static herr_t H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size,
- hsize_t **permute_map, MPI_Datatype *new_type, int *count,
- hbool_t *is_derived_type);
-static herr_t H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
-static herr_t H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S__mpio_all_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type);
+static herr_t H5S__mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint *disp,
+ MPI_Datatype *new_type);
+static herr_t H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute_map,
+ hbool_t *is_permuted);
+static herr_t H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute_map,
+ MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
+static herr_t H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
+ int *count, hbool_t *is_derived_type);
+static herr_t H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
+ int *count, hbool_t *is_derived_type);
static herr_t H5S__release_datatype(H5S_mpio_mpitype_list_t *type_list);
-static herr_t H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
- size_t elmt_size, const MPI_Datatype *elmt_type, MPI_Datatype *span_type,
- H5S_mpio_mpitype_list_t *type_list, unsigned op_info_i, uint64_t op_gen);
-
+static herr_t H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t elmt_size,
+ const MPI_Datatype *elmt_type, MPI_Datatype *span_type,
+ H5S_mpio_mpitype_list_t *type_list, unsigned op_info_i, uint64_t op_gen);
/*****************************/
/* Library Private Variables */
/*****************************/
-
/*********************/
/* Package Variables */
/*********************/
-
/* Declare a free list to manage the H5S_mpio_mpitype_node_t struct */
H5FL_DEFINE_STATIC(H5S_mpio_mpitype_node_t);
-
/*-------------------------------------------------------------------------
* Function: H5S__mpio_all_type
*
@@ -120,14 +112,14 @@ H5FL_DEFINE_STATIC(H5S_mpio_mpitype_node_t);
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__mpio_all_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
+H5S__mpio_all_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type)
{
- hsize_t total_bytes;
- hssize_t snelmts; /* Total number of elmts (signed) */
- hsize_t nelmts; /* Total number of elmts */
- hsize_t bigio_count; /* Transition point to create derived type */
- herr_t ret_value = SUCCEED; /* Return value */
+ hsize_t total_bytes;
+ hssize_t snelmts; /* Total number of elmts (signed) */
+ hsize_t nelmts; /* Total number of elmts */
+ hsize_t bigio_count; /* Transition point to create derived type */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -135,15 +127,15 @@ H5S__mpio_all_type(const H5S_t *space, size_t elmt_size,
HDassert(space);
/* Just treat the entire extent as a block of bytes */
- if((snelmts = (hssize_t)H5S_GET_EXTENT_NPOINTS(space)) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
+ if ((snelmts = (hssize_t)H5S_GET_EXTENT_NPOINTS(space)) < 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
total_bytes = (hsize_t)elmt_size * nelmts;
bigio_count = H5_mpi_get_bigio_count();
/* Verify that the size can be expressed as a 32 bit integer */
- if(bigio_count >= total_bytes) {
+ if (bigio_count >= total_bytes) {
/* fill in the return values */
*new_type = MPI_BYTE;
H5_CHECKED_ASSIGN(*count, int, total_bytes, hsize_t);
@@ -151,9 +143,10 @@ H5S__mpio_all_type(const H5S_t *space, size_t elmt_size,
}
else {
/* Create a LARGE derived datatype for this transfer */
- if(H5_mpio_create_large_type(total_bytes, 0, MPI_BYTE, new_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large datatype from the all selection")
- *count = 1;
+ if (H5_mpio_create_large_type(total_bytes, 0, MPI_BYTE, new_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large datatype from the all selection")
+ *count = 1;
*is_derived_type = TRUE;
}
@@ -161,7 +154,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5S__mpio_all_type() */
-
/*-------------------------------------------------------------------------
* Function: H5S__mpio_none_type
*
@@ -184,14 +176,13 @@ H5S__mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type
FUNC_ENTER_STATIC_NOERR
/* fill in the return values */
- *new_type = MPI_BYTE;
- *count = 0;
+ *new_type = MPI_BYTE;
+ *count = 0;
*is_derived_type = FALSE;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5S__mpio_none_type() */
-
/*-------------------------------------------------------------------------
* Function: H5S__mpio_create_point_datatype
*
@@ -206,52 +197,53 @@ H5S__mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points,
- MPI_Aint *disp, MPI_Datatype *new_type)
+H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint *disp, MPI_Datatype *new_type)
{
- MPI_Datatype elmt_type; /* MPI datatype for individual element */
- hbool_t elmt_type_created = FALSE; /* Whether the element MPI datatype was created */
- int *inner_blocks = NULL; /* Arrays for MPI datatypes when "large" datatype needed */
- MPI_Aint *inner_disps = NULL;
- MPI_Datatype *inner_types = NULL;
+ MPI_Datatype elmt_type; /* MPI datatype for individual element */
+ hbool_t elmt_type_created = FALSE; /* Whether the element MPI datatype was created */
+ int * inner_blocks = NULL; /* Arrays for MPI datatypes when "large" datatype needed */
+ MPI_Aint * inner_disps = NULL;
+ MPI_Datatype *inner_types = NULL;
#if MPI_VERSION < 3
- int *blocks = NULL; /* Array of block sizes for MPI hindexed create call */
- hsize_t u; /* Local index variable */
+ int * blocks = NULL; /* Array of block sizes for MPI hindexed create call */
+ hsize_t u; /* Local index variable */
#endif
- hsize_t bigio_count; /* Transition point to create derived type */
- int mpi_code; /* MPI error code */
- herr_t ret_value = SUCCEED; /* Return value */
+ hsize_t bigio_count; /* Transition point to create derived type */
+ int mpi_code; /* MPI error code */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Create an MPI datatype for an element */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
elmt_type_created = TRUE;
bigio_count = H5_mpi_get_bigio_count();
/* Check whether standard or BIGIO processing will be employeed */
- if(bigio_count >= num_points) {
+ if (bigio_count >= num_points) {
#if MPI_VERSION >= 3
/* Create an MPI datatype for the whole point selection */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type)))
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code)
#else
/* Allocate block sizes for MPI datatype call */
- if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points)))
+ if (NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
- for(u = 0; u < num_points; u++)
+ for (u = 0; u < num_points; u++)
blocks[u] = 1;
/* Create an MPI datatype for the whole point selection */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type)))
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
#endif
/* Commit MPI datatype for later use */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
}
else {
@@ -259,9 +251,9 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points,
* We'll create an hindexed_block type for every 2G point count and then combine
* those and any remaining points into a single large datatype.
*/
- int total_types, i;
- int remaining_points;
- int num_big_types;
+ int total_types, i;
+ int remaining_points;
+ int num_big_types;
hsize_t leftover;
/* Calculate how many Big MPI datatypes are needed to represent the buffer */
@@ -273,82 +265,85 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points,
total_types = (int)(remaining_points) ? (num_big_types + 1) : num_big_types;
/* Allocate array if MPI derived types needed */
- if(NULL == (inner_types = (MPI_Datatype *)H5MM_malloc((sizeof(MPI_Datatype) * (size_t)total_types))))
+ if (NULL == (inner_types = (MPI_Datatype *)H5MM_malloc((sizeof(MPI_Datatype) * (size_t)total_types))))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
- if(NULL == (inner_blocks = (int *)H5MM_malloc(sizeof(int) * (size_t)total_types)))
+ if (NULL == (inner_blocks = (int *)H5MM_malloc(sizeof(int) * (size_t)total_types)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
- if(NULL == (inner_disps = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * (size_t)total_types)))
+ if (NULL == (inner_disps = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * (size_t)total_types)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
#if MPI_VERSION < 3
/* Allocate block sizes for MPI datatype call */
- if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * bigio_count)))
+ if (NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * bigio_count)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
- for(u = 0; u < bigio_count; u++)
+ for (u = 0; u < bigio_count; u++)
blocks[u] = 1;
#endif
- for(i = 0; i < num_big_types; i++) {
+ for (i = 0; i < num_big_types; i++) {
#if MPI_VERSION >= 3
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)bigio_count,
- 1, &disp[(hsize_t)i*bigio_count], elmt_type, &inner_types[i])))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)bigio_count, 1,
+ &disp[(hsize_t)i * bigio_count],
+ elmt_type, &inner_types[i])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code);
#else
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)bigio_count,
- blocks, &disp[i*bigio_count], elmt_type, &inner_types[i])))
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_hindexed((int)bigio_count, blocks, &disp[i * bigio_count],
+ elmt_type, &inner_types[i])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
#endif
inner_blocks[i] = 1;
inner_disps[i] = 0;
} /* end for*/
- if(remaining_points) {
+ if (remaining_points) {
#if MPI_VERSION >= 3
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(remaining_points,
- 1, &disp[(hsize_t)num_big_types*bigio_count], elmt_type, &inner_types[num_big_types])))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(
+ remaining_points, 1, &disp[(hsize_t)num_big_types * bigio_count],
+ elmt_type, &inner_types[num_big_types])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code);
#else
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)remaining_points,
- blocks, &disp[num_big_types*bigio_count], elmt_type, &inner_types[num_big_types])))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)remaining_points, blocks,
+ &disp[num_big_types * bigio_count],
+ elmt_type, &inner_types[num_big_types])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
#endif
inner_blocks[num_big_types] = 1;
- inner_disps[num_big_types] = 0;
+ inner_disps[num_big_types] = 0;
}
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(total_types,
- inner_blocks, inner_disps, inner_types, new_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(total_types, inner_blocks, inner_disps,
+ inner_types, new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct", mpi_code);
- for(i = 0; i < total_types; i++)
+ for (i = 0; i < total_types; i++)
MPI_Type_free(&inner_types[i]);
/* Commit MPI datatype for later use */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
} /* end else */
done:
- if(elmt_type_created)
+ if (elmt_type_created)
MPI_Type_free(&elmt_type);
#if MPI_VERSION < 3
- if(blocks)
+ if (blocks)
H5MM_free(blocks);
#endif
- if(inner_types)
+ if (inner_types)
H5MM_free(inner_types);
- if(inner_blocks)
+ if (inner_blocks)
H5MM_free(inner_blocks);
- if(inner_disps)
+ if (inner_disps)
H5MM_free(inner_disps);
FUNC_LEAVE_NOAPI(ret_value)
} /* H5S__mpio_create_point_datatype() */
-
/*-------------------------------------------------------------------------
* Function: H5S__mpio_point_type
*
@@ -370,16 +365,15 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
- int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute,
- hbool_t *is_permuted)
+H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute, hbool_t *is_permuted)
{
- MPI_Aint *disp = NULL; /* Datatype displacement for each point*/
- H5S_pnt_node_t *curr = NULL; /* Current point being operated on in from the selection */
- hssize_t snum_points; /* Signed number of elements in selection */
- hsize_t num_points; /* Sumber of points in the selection */
- hsize_t u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ MPI_Aint * disp = NULL; /* Datatype displacement for each point*/
+ H5S_pnt_node_t *curr = NULL; /* Current point being operated on in from the selection */
+ hssize_t snum_points; /* Signed number of elements in selection */
+ hsize_t num_points; /* Sumber of points in the selection */
+ hsize_t u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -387,25 +381,25 @@ H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_typ
HDassert(space);
/* Get the total number of points selected */
- if((snum_points = (hssize_t)H5S_GET_SELECT_NPOINTS(space)) < 0)
+ if ((snum_points = (hssize_t)H5S_GET_SELECT_NPOINTS(space)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
num_points = (hsize_t)snum_points;
/* Allocate array for element displacements */
- if(NULL == (disp = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * num_points)))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ if (NULL == (disp = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * num_points)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
/* Allocate array for element permutation - returned to caller */
- if(do_permute)
- if(NULL == (*permute = (hsize_t *)H5MM_malloc(sizeof(hsize_t) * num_points)))
+ if (do_permute)
+ if (NULL == (*permute = (hsize_t *)H5MM_malloc(sizeof(hsize_t) * num_points)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate permutation array")
/* Iterate through list of elements */
curr = space->select.sel_info.pnt_lst->head;
- for(u = 0 ; u < num_points ; u++) {
+ for (u = 0; u < num_points; u++) {
/* Calculate the displacement of the current point */
hsize_t disp_tmp = H5VM_array_offset(space->extent.rank, space->extent.size, curr->pnt);
- if(disp_tmp > LONG_MAX) /* Maximum value of type long */
+ if (disp_tmp > LONG_MAX) /* Maximum value of type long */
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "disp overflow")
disp[u] = (MPI_Aint)disp_tmp;
disp[u] *= (MPI_Aint)elmt_size;
@@ -425,22 +419,22 @@ H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_typ
* point 4: map[3] = 2, move the 2nd position since point 1 has a higher disp,
* but points 2 and 3 have lower displacements.
*/
- if(do_permute) {
- if(u > 0 && disp[u] < disp[u - 1]) {
+ if (do_permute) {
+ if (u > 0 && disp[u] < disp[u - 1]) {
hsize_t s = 0, l = u, m = u / 2;
*is_permuted = TRUE;
do {
- if(disp[u] > disp[m])
+ if (disp[u] > disp[m])
s = m + 1;
- else if(disp[u] < disp[m])
+ else if (disp[u] < disp[m])
l = m;
else
break;
m = s + ((l - s) / 2);
- } while(s < l);
+ } while (s < l);
- if(m < u) {
+ if (m < u) {
MPI_Aint temp;
temp = disp[u];
@@ -455,27 +449,27 @@ H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_typ
/* this is a memory space, and no permutation is necessary to create
the derived datatype */
else {
- ; /* do nothing */
- } /* end else */
+ ; /* do nothing */
+ } /* end else */
/* get the next point */
curr = curr->next;
} /* end for */
/* Create the MPI datatype for the set of element displacements */
- if(H5S__mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0)
+ if (H5S__mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create an MPI Datatype from point selection")
/* Set values about MPI datatype created */
- *count = 1;
+ *count = 1;
*is_derived_type = TRUE;
done:
- if(NULL != disp)
+ if (NULL != disp)
H5MM_free(disp);
/* Release the permutation buffer, if it wasn't used */
- if(!(*is_permuted) && (*permute)) {
+ if (!(*is_permuted) && (*permute)) {
H5MM_free(*permute);
*permute = NULL;
} /* end if */
@@ -483,7 +477,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5S__mpio_point_type() */
-
/*-------------------------------------------------------------------------
* Function: H5S__mpio_permute_type
*
@@ -508,17 +501,17 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
+H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute, MPI_Datatype *new_type,
+ int *count, hbool_t *is_derived_type)
{
- MPI_Aint *disp = NULL; /* Datatype displacement for each point*/
- H5S_sel_iter_t sel_iter; /* Selection iteration info */
- hbool_t sel_iter_init = FALSE; /* Selection iteration info has been initialized */
- hssize_t snum_points; /* Signed number of elements in selection */
- hsize_t num_points; /* Number of points in the selection */
- size_t max_elem; /* Maximum number of elements allowed in sequences */
- hsize_t u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ MPI_Aint * disp = NULL; /* Datatype displacement for each point*/
+ H5S_sel_iter_t sel_iter; /* Selection iteration info */
+ hbool_t sel_iter_init = FALSE; /* Selection iteration info has been initialized */
+ hssize_t snum_points; /* Signed number of elements in selection */
+ hsize_t num_points; /* Number of points in the selection */
+ size_t max_elem; /* Maximum number of elements allowed in sequences */
+ hsize_t u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -526,39 +519,40 @@ H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
HDassert(space);
/* Get the total number of points selected */
- if((snum_points = (hssize_t)H5S_GET_SELECT_NPOINTS(space)) < 0)
+ if ((snum_points = (hssize_t)H5S_GET_SELECT_NPOINTS(space)) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
num_points = (hsize_t)snum_points;
/* Allocate array to store point displacements */
- if(NULL == (disp = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * num_points)))
+ if (NULL == (disp = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * num_points)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
/* Initialize selection iterator */
- if(H5S_select_iter_init(&sel_iter, space, elmt_size, 0) < 0)
+ if (H5S_select_iter_init(&sel_iter, space, elmt_size, 0) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
- sel_iter_init = TRUE; /* Selection iteration info has been initialized */
+ sel_iter_init = TRUE; /* Selection iteration info has been initialized */
/* Set the number of elements to iterate over */
H5_CHECKED_ASSIGN(max_elem, size_t, num_points, hsize_t);
/* Loop, while elements left in selection */
u = 0;
- while(max_elem > 0) {
- hsize_t off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
- size_t len[H5D_IO_VECTOR_SIZE]; /* Array to store sequence lengths */
- size_t nelem; /* Number of elements used in sequences */
- size_t nseq; /* Number of sequences generated */
- size_t curr_seq; /* Current sequence being worked on */
+ while (max_elem > 0) {
+ hsize_t off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
+ size_t len[H5D_IO_VECTOR_SIZE]; /* Array to store sequence lengths */
+ size_t nelem; /* Number of elements used in sequences */
+ size_t nseq; /* Number of sequences generated */
+ size_t curr_seq; /* Current sequence being worked on */
/* Get the sequences of bytes */
- if(H5S_SELECT_ITER_GET_SEQ_LIST(&sel_iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off, len) < 0)
+ if (H5S_SELECT_ITER_GET_SEQ_LIST(&sel_iter, (size_t)H5D_IO_VECTOR_SIZE, max_elem, &nseq, &nelem, off,
+ len) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
/* Loop, while sequences left to process */
- for(curr_seq = 0; curr_seq < nseq; curr_seq++) {
- hsize_t curr_off; /* Current offset within sequence */
- size_t curr_len; /* Length of bytes left to process in sequence */
+ for (curr_seq = 0; curr_seq < nseq; curr_seq++) {
+ hsize_t curr_off; /* Current offset within sequence */
+ size_t curr_len; /* Length of bytes left to process in sequence */
/* Get the current offset */
curr_off = off[curr_seq];
@@ -567,19 +561,19 @@ H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
curr_len = len[curr_seq];
/* Loop, while bytes left in sequence */
- while(curr_len > 0) {
+ while (curr_len > 0) {
/* Set the displacement of the current point */
- if(curr_off > LONG_MAX)
+ if (curr_off > LONG_MAX)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "curr_off overflow")
disp[u] = (MPI_Aint)curr_off;
/* This is a memory displacement, so for each point selected,
* apply the map that was generated by the file selection */
- if((*permute)[u] != num_points) {
+ if ((*permute)[u] != num_points) {
MPI_Aint temp = disp[u];
HDmemmove(disp + (*permute)[u] + 1, disp + (*permute)[u],
- (u - (*permute)[u]) * sizeof(MPI_Aint));
+ (u - (*permute)[u]) * sizeof(MPI_Aint));
disp[(*permute)[u]] = temp;
} /* end if */
@@ -592,30 +586,30 @@ H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
/* Decrement number of bytes left in sequence */
curr_len -= elmt_size;
} /* end while */
- } /* end for */
+ } /* end for */
/* Decrement number of elements left to process */
max_elem -= nelem;
} /* end while */
/* Create the MPI datatype for the set of element displacements */
- if(H5S__mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0)
+ if (H5S__mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create an MPI Datatype from point selection")
/* Set values about MPI datatype created */
- *count = 1;
+ *count = 1;
*is_derived_type = TRUE;
done:
/* Release selection iterator */
- if(sel_iter_init)
- if(H5S_SELECT_ITER_RELEASE(&sel_iter) < 0)
+ if (sel_iter_init)
+ if (H5S_SELECT_ITER_RELEASE(&sel_iter) < 0)
HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator")
/* Free memory */
- if(disp)
+ if (disp)
H5MM_free(disp);
- if(*permute) {
+ if (*permute) {
H5MM_free(*permute);
*permute = NULL;
} /* end if */
@@ -623,7 +617,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5S__mpio_permute_type() */
-
/*-------------------------------------------------------------------------
* Function: H5S__mpio_reg_hyper_type
*
@@ -641,32 +634,32 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
+H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type)
{
- H5S_sel_iter_t sel_iter; /* Selection iteration info */
- hbool_t sel_iter_init = FALSE; /* Selection iteration info has been initialized */
+ H5S_sel_iter_t sel_iter; /* Selection iteration info */
+ hbool_t sel_iter_init = FALSE; /* Selection iteration info has been initialized */
- struct dim { /* less hassle than malloc/free & ilk */
+ struct dim { /* less hassle than malloc/free & ilk */
hssize_t start;
- hsize_t strid;
- hsize_t block;
- hsize_t xtent;
- hsize_t count;
+ hsize_t strid;
+ hsize_t block;
+ hsize_t xtent;
+ hsize_t count;
} d[H5S_MAX_RANK];
- hsize_t bigio_count; /* Transition point to create derived type */
- hsize_t offset[H5S_MAX_RANK];
- hsize_t max_xtent[H5S_MAX_RANK];
- H5S_hyper_dim_t *diminfo; /* [rank] */
- unsigned rank;
- MPI_Datatype inner_type, outer_type;
- MPI_Aint extent_len, start_disp, new_extent;
- MPI_Aint lb; /* Needed as an argument for MPI_Type_get_extent */
- unsigned u; /* Local index variable */
- int i; /* Local index variable */
- int mpi_code; /* MPI return code */
- herr_t ret_value = SUCCEED;
+ hsize_t bigio_count; /* Transition point to create derived type */
+ hsize_t offset[H5S_MAX_RANK];
+ hsize_t max_xtent[H5S_MAX_RANK];
+ H5S_hyper_dim_t *diminfo; /* [rank] */
+ unsigned rank;
+ MPI_Datatype inner_type, outer_type;
+ MPI_Aint extent_len, start_disp, new_extent;
+ MPI_Aint lb; /* Needed as an argument for MPI_Type_get_extent */
+ unsigned u; /* Local index variable */
+ int i; /* Local index variable */
+ int mpi_code; /* MPI return code */
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -676,9 +669,9 @@ H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size,
bigio_count = H5_mpi_get_bigio_count();
/* Initialize selection iterator */
- if(H5S_select_iter_init(&sel_iter, space, elmt_size, 0) < 0)
+ if (H5S_select_iter_init(&sel_iter, space, elmt_size, 0) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
- sel_iter_init = TRUE; /* Selection iteration info has been initialized */
+ sel_iter_init = TRUE; /* Selection iteration info has been initialized */
/* Abbreviate args */
diminfo = sel_iter.u.hyp.diminfo;
@@ -687,14 +680,14 @@ H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size,
/* Make a local copy of the dimension info so we can operate with them */
/* Check if this is a "flattened" regular hyperslab selection */
- if(sel_iter.u.hyp.iter_rank != 0 && sel_iter.u.hyp.iter_rank < space->extent.rank) {
+ if (sel_iter.u.hyp.iter_rank != 0 && sel_iter.u.hyp.iter_rank < space->extent.rank) {
/* Flattened selection */
rank = sel_iter.u.hyp.iter_rank;
#ifdef H5S_DEBUG
-if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "%s: Flattened selection\n",FUNC);
+ if (H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: Flattened selection\n", FUNC);
#endif
- for(u = 0; u < rank; ++u) {
+ for (u = 0; u < rank; ++u) {
H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t)
d[u].start = (hssize_t)diminfo[u].start + sel_iter.u.hyp.sel_off[u];
d[u].strid = diminfo[u].stride;
@@ -703,14 +696,16 @@ if(H5DEBUG(S))
d[u].xtent = sel_iter.u.hyp.size[u];
#ifdef H5S_DEBUG
-if(H5DEBUG(S)) {
- HDfprintf(H5DEBUG(S), "%s: start=%" PRIdHSIZE " stride=%" PRIuHSIZE " count=%" PRIuHSIZE " block=%" PRIuHSIZE " xtent=%" PRIuHSIZE,
- FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent);
- if(u == 0)
- HDfprintf(H5DEBUG(S), " rank=%u\n", rank);
- else
- HDfprintf(H5DEBUG(S), "\n");
-}
+ if (H5DEBUG(S)) {
+ HDfprintf(H5DEBUG(S),
+ "%s: start=%" PRIdHSIZE " stride=%" PRIuHSIZE " count=%" PRIuHSIZE
+ " block=%" PRIuHSIZE " xtent=%" PRIuHSIZE,
+ FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent);
+ if (u == 0)
+ HDfprintf(H5DEBUG(S), " rank=%u\n", rank);
+ else
+ HDfprintf(H5DEBUG(S), "\n");
+ }
#endif
/* Sanity check */
@@ -718,15 +713,15 @@ if(H5DEBUG(S)) {
HDassert(d[u].count > 0);
HDassert(d[u].xtent > 0);
} /* end for */
- } /* end if */
+ } /* end if */
else {
/* Non-flattened selection */
rank = space->extent.rank;
#ifdef H5S_DEBUG
-if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S),"%s: Non-flattened selection\n",FUNC);
+ if (H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: Non-flattened selection\n", FUNC);
#endif
- for(u = 0; u < rank; ++u) {
+ for (u = 0; u < rank; ++u) {
H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t)
d[u].start = (hssize_t)diminfo[u].start + space->select.offset[u];
d[u].strid = diminfo[u].stride;
@@ -735,14 +730,16 @@ if(H5DEBUG(S))
d[u].xtent = space->extent.size[u];
#ifdef H5S_DEBUG
-if(H5DEBUG(S)) {
- HDfprintf(H5DEBUG(S), "%s: start=%" PRIdHSIZE " stride=%" PRIuHSIZE " count=%" PRIuHSIZE " block=%" PRIuHSIZE " xtent=%" PRIuHSIZE,
- FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent);
- if(u == 0)
- HDfprintf(H5DEBUG(S), " rank=%u\n", rank);
- else
- HDfprintf(H5DEBUG(S), "\n");
-}
+ if (H5DEBUG(S)) {
+ HDfprintf(H5DEBUG(S),
+ "%s: start=%" PRIdHSIZE " stride=%" PRIuHSIZE " count=%" PRIuHSIZE
+ " block=%" PRIuHSIZE " xtent=%" PRIuHSIZE,
+ FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent);
+ if (u == 0)
+ HDfprintf(H5DEBUG(S), " rank=%u\n", rank);
+ else
+ HDfprintf(H5DEBUG(S), "\n");
+ }
#endif
/* Sanity check */
@@ -750,26 +747,28 @@ if(H5DEBUG(S)) {
HDassert(d[u].count > 0);
HDassert(d[u].xtent > 0);
} /* end for */
- } /* end else */
+ } /* end else */
-/**********************************************************************
- Compute array "offset[rank]" which gives the offsets for a multi-
- dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1).
-**********************************************************************/
- offset[rank - 1] = 1;
+ /**********************************************************************
+ Compute array "offset[rank]" which gives the offsets for a multi-
+ dimensional array with dimensions "d[i].xtent" (i=0,1,...,rank-1).
+ **********************************************************************/
+ offset[rank - 1] = 1;
max_xtent[rank - 1] = d[rank - 1].xtent;
#ifdef H5S_DEBUG
-if(H5DEBUG(S)) {
- i = ((int)rank) - 1;
- HDfprintf(H5DEBUG(S), " offset[%2d]=%" PRIuHSIZE "; max_xtent[%2d]=%" PRIuHSIZE "\n", i, offset[i], i, max_xtent[i]);
-}
+ if (H5DEBUG(S)) {
+ i = ((int)rank) - 1;
+ HDfprintf(H5DEBUG(S), " offset[%2d]=%" PRIuHSIZE "; max_xtent[%2d]=%" PRIuHSIZE "\n", i, offset[i], i,
+ max_xtent[i]);
+ }
#endif
- for(i = ((int)rank) - 2; i >= 0; --i) {
- offset[i] = offset[i + 1] * d[i + 1].xtent;
+ for (i = ((int)rank) - 2; i >= 0; --i) {
+ offset[i] = offset[i + 1] * d[i + 1].xtent;
max_xtent[i] = max_xtent[i + 1] * d[i].xtent;
#ifdef H5S_DEBUG
-if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), " offset[%2d]=%" PRIuHSIZE "; max_xtent[%2d]=%" PRIuHSIZE "\n", i, offset[i], i, max_xtent[i]);
+ if (H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), " offset[%2d]=%" PRIuHSIZE "; max_xtent[%2d]=%" PRIuHSIZE "\n", i,
+ offset[i], i, max_xtent[i]);
#endif
} /* end for */
@@ -780,14 +779,14 @@ if(H5DEBUG(S))
*/
/*******************************************************
-* Construct contig type for inner contig dims:
-*******************************************************/
+ * Construct contig type for inner contig dims:
+ *******************************************************/
#ifdef H5S_DEBUG
-if(H5DEBUG(S)) {
- HDfprintf(H5DEBUG(S), "%s: Making contig type %zu MPI_BYTEs\n", FUNC, elmt_size);
- for(i = ((int)rank) - 1; i >= 0; --i)
- HDfprintf(H5DEBUG(S), "d[%d].xtent=%" PRIuHSIZE "\n", i, d[i].xtent);
-}
+ if (H5DEBUG(S)) {
+ HDfprintf(H5DEBUG(S), "%s: Making contig type %zu MPI_BYTEs\n", FUNC, elmt_size);
+ for (i = ((int)rank) - 1; i >= 0; --i)
+ HDfprintf(H5DEBUG(S), "d[%d].xtent=%" PRIuHSIZE "\n", i, d[i].xtent);
+ }
#endif
/* LARGE_DATATYPE::
@@ -796,51 +795,51 @@ if(H5DEBUG(S)) {
* Otherwise create a compound datatype by iterating as many times as needed
* for the innertype to be created.
*/
- if(bigio_count >= elmt_size) {
+ if (bigio_count >= elmt_size) {
/* Use a single MPI datatype that has a 32 bit size */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
}
else
/* Create the compound datatype for this operation (> 2GB) */
- if(H5_mpio_create_large_type(elmt_size, 0, MPI_BYTE, &inner_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large inner datatype in hyper selection")
-
-/*******************************************************
-* Construct the type by walking the hyperslab dims
-* from the inside out:
-*******************************************************/
- for(i = ((int)rank) - 1; i >= 0; --i) {
+ if (H5_mpio_create_large_type(elmt_size, 0, MPI_BYTE, &inner_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large inner datatype in hyper selection")
+
+ /*******************************************************
+ * Construct the type by walking the hyperslab dims
+ * from the inside out:
+ *******************************************************/
+ for (i = ((int)rank) - 1; i >= 0; --i) {
#ifdef H5S_DEBUG
-if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n"
- "start=%" PRIdHSIZE " count=%" PRIuHSIZE " block=%" PRIuHSIZE
- " stride=%" PRIuHSIZE ", xtent=%" PRIuHSIZE
- " max_xtent=%" PRIuHSIZE "\n",
- FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]);
+ if (H5DEBUG(S))
+ HDfprintf(H5DEBUG(S),
+ "%s: Dimension i=%d \n"
+ "start=%" PRIdHSIZE " count=%" PRIuHSIZE " block=%" PRIuHSIZE " stride=%" PRIuHSIZE
+ ", xtent=%" PRIuHSIZE " max_xtent=%" PRIuHSIZE "\n",
+ FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]);
#endif
#ifdef H5S_DEBUG
-if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i);
+ if (H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC, i);
#endif
- /****************************************
- * Build vector type of the selection.
- ****************************************/
- if(bigio_count >= d[i].count &&
- bigio_count >= d[i].block && bigio_count >= d[i].strid) {
+ /****************************************
+ * Build vector type of the selection.
+ ****************************************/
+ if (bigio_count >= d[i].count && bigio_count >= d[i].block && bigio_count >= d[i].strid) {
/* All the parameters fit into 32 bit integers so create the vector type normally */
- mpi_code = MPI_Type_vector((int)(d[i].count), /* count */
- (int)(d[i].block), /* blocklength */
- (int)(d[i].strid), /* stride */
- inner_type, /* old type */
- &outer_type); /* new type */
+ mpi_code = MPI_Type_vector((int)(d[i].count), /* count */
+ (int)(d[i].block), /* blocklength */
+ (int)(d[i].strid), /* stride */
+ inner_type, /* old type */
+ &outer_type); /* new type */
MPI_Type_free(&inner_type);
- if(mpi_code != MPI_SUCCESS)
+ if (mpi_code != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code)
- }
- else {
+ }
+ else {
/* Things get a bit more complicated and require LARGE_DATATYPE processing
* There are two MPI datatypes that need to be created:
* 1) an internal contiguous block; and
@@ -852,19 +851,20 @@ if(H5DEBUG(S))
* (2GB-1)number_of_blocks * the_datatype_extent.
*/
- MPI_Aint stride_in_bytes, inner_extent;
+ MPI_Aint stride_in_bytes, inner_extent;
MPI_Datatype block_type;
/* Create a contiguous datatype inner_type x number of BLOCKS.
* Again we need to check that the number of BLOCKS can fit into
* a 32 bit integer */
- if(bigio_count < d[i].block) {
- if(H5_mpio_create_large_type(d[i].block, 0, inner_type, &block_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large block datatype in hyper selection")
+ if (bigio_count < d[i].block) {
+ if (H5_mpio_create_large_type(d[i].block, 0, inner_type, &block_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large block datatype in hyper selection")
}
- else
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)d[i].block, inner_type, &block_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ else if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_contiguous((int)d[i].block, inner_type, &block_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
/* As of version 4.0, OpenMPI now turns off MPI-1 API calls by default,
* so we're using the MPI-2 version even though we don't need the lb
@@ -879,18 +879,18 @@ if(H5DEBUG(S))
/* If the element count is larger than what a 32 bit integer can hold,
* we call the large type creation function to handle that
*/
- if(bigio_count < d[i].count) {
- if(H5_mpio_create_large_type(d[i].count, stride_in_bytes, block_type, &outer_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large outer datatype in hyper selection")
+ if (bigio_count < d[i].count) {
+ if (H5_mpio_create_large_type(d[i].count, stride_in_bytes, block_type, &outer_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large outer datatype in hyper selection")
}
/* otherwise a regular create_hvector will do */
- else
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)d[i].count, /* count */
- 1, /* blocklength */
- stride_in_bytes, /* stride in bytes*/
- block_type, /* old type */
- &outer_type))) /* new type */
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
+ else if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)d[i].count, /* count */
+ 1, /* blocklength */
+ stride_in_bytes, /* stride in bytes*/
+ block_type, /* old type */
+ &outer_type))) /* new type */
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
MPI_Type_free(&block_type);
MPI_Type_free(&inner_type);
@@ -902,14 +902,14 @@ if(H5DEBUG(S))
/* Calculate start and extent values of this dimension */
/* Check if value overflow to cast to type MPI_Aint */
- if(d[i].start > LONG_MAX || offset[i] > LONG_MAX || elmt_size > LONG_MAX)
+ if (d[i].start > LONG_MAX || offset[i] > LONG_MAX || elmt_size > LONG_MAX)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "result overflow")
- start_disp = (MPI_Aint)d[i].start * (MPI_Aint)offset[i] * (MPI_Aint)elmt_size;
+ start_disp = (MPI_Aint)d[i].start * (MPI_Aint)offset[i] * (MPI_Aint)elmt_size;
- if(max_xtent[i] > LONG_MAX)
+ if (max_xtent[i] > LONG_MAX)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "max_xtent overflow")
new_extent = (MPI_Aint)elmt_size * (MPI_Aint)max_xtent[i];
- if(MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(outer_type, &lb, &extent_len)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(outer_type, &lb, &extent_len)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_get_extent failed", mpi_code)
/*************************************************
@@ -917,52 +917,52 @@ if(H5DEBUG(S))
* so that it still starts at 0, but its extent
* is the full extent in this dimension.
*************************************************/
- if(start_disp > 0 || extent_len < new_extent) {
+ if (start_disp > 0 || extent_len < new_extent) {
MPI_Datatype interm_type;
- int block_len = 1;
+ int block_len = 1;
HDassert(0 == lb);
mpi_code = MPI_Type_create_hindexed(1, &block_len, &start_disp, outer_type, &interm_type);
MPI_Type_free(&outer_type);
- if(mpi_code != MPI_SUCCESS)
+ if (mpi_code != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
mpi_code = MPI_Type_create_resized(interm_type, lb, new_extent, &inner_type);
MPI_Type_free(&interm_type);
- if(mpi_code != MPI_SUCCESS)
+ if (mpi_code != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "couldn't resize MPI vector type", mpi_code)
} /* end if */
else
inner_type = outer_type;
} /* end for */
-/******************************************
-* End of loop, walking through dimensions.
-*******************************************/
+ /******************************************
+ * End of loop, walking through dimensions.
+ *******************************************/
/* At this point inner_type is actually the outermost type, even for 0-trip loop */
*new_type = inner_type;
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* fill in the remaining return values */
- *count = 1; /* only have to move one of these suckers! */
+ *count = 1; /* only have to move one of these suckers! */
*is_derived_type = TRUE;
done:
/* Release selection iterator */
- if(sel_iter_init)
- if(H5S_SELECT_ITER_RELEASE(&sel_iter) < 0)
+ if (sel_iter_init)
+ if (H5S_SELECT_ITER_RELEASE(&sel_iter) < 0)
HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator")
#ifdef H5S_DEBUG
-if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "Leave %s, count=%d is_derived_type=%s\n", FUNC, *count, (*is_derived_type) ? "TRUE" : "FALSE");
+ if (H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "Leave %s, count=%d is_derived_type=%s\n", FUNC, *count,
+ (*is_derived_type) ? "TRUE" : "FALSE");
#endif
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S__mpio_reg_hyper_type() */
-
/*-------------------------------------------------------------------------
* Function: H5S__mpio_span_hyper_type
*
@@ -981,18 +981,18 @@ if(H5DEBUG(S))
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
- MPI_Datatype *new_type, int *count, hbool_t *is_derived_type)
+H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type)
{
- H5S_mpio_mpitype_list_t type_list; /* List to track MPI data types created */
- MPI_Datatype elmt_type; /* MPI datatype for an element */
- hbool_t elmt_type_is_derived = FALSE; /* Whether the element type has been created */
- MPI_Datatype span_type; /* MPI datatype for overall span tree */
- hsize_t bigio_count; /* Transition point to create derived type */
- hsize_t down[H5S_MAX_RANK]; /* 'down' sizes for each dimension */
- uint64_t op_gen; /* Operation generation value */
- int mpi_code; /* MPI return code */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5S_mpio_mpitype_list_t type_list; /* List to track MPI data types created */
+ MPI_Datatype elmt_type; /* MPI datatype for an element */
+ hbool_t elmt_type_is_derived = FALSE; /* Whether the element type has been created */
+ MPI_Datatype span_type; /* MPI datatype for overall span tree */
+ hsize_t bigio_count; /* Transition point to create derived type */
+ hsize_t down[H5S_MAX_RANK]; /* 'down' sizes for each dimension */
+ uint64_t op_gen; /* Operation generation value */
+ int mpi_code; /* MPI return code */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1004,17 +1004,17 @@ H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
bigio_count = H5_mpi_get_bigio_count();
/* Create the base type for an element */
- if(bigio_count >= elmt_size) {
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
+ if (bigio_count >= elmt_size) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
}
- else
- if(H5_mpio_create_large_type(elmt_size, 0, MPI_BYTE, &elmt_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large element datatype in span_hyper selection")
+ else if (H5_mpio_create_large_type(elmt_size, 0, MPI_BYTE, &elmt_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large element datatype in span_hyper selection")
elmt_type_is_derived = TRUE;
/* Compute 'down' sizes for each dimension */
- if(H5VM_array_down(space->extent.rank, space->extent.size, down) < 0)
+ if (H5VM_array_down(space->extent.rank, space->extent.size, down) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGETSIZE, FAIL, "couldn't compute 'down' dimension sizes")
/* Acquire an operation generation value for creating MPI datatypes */
@@ -1024,31 +1024,31 @@ H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
/* Always use op_info[0] since we own this op_info, so there can be no
* simultaneous operations */
type_list.head = type_list.tail = NULL;
- if(H5S__obtain_datatype(space->select.sel_info.hslab->span_lst, down, elmt_size, &elmt_type, &span_type, &type_list, 0, op_gen) < 0)
+ if (H5S__obtain_datatype(space->select.sel_info.hslab->span_lst, down, elmt_size, &elmt_type, &span_type,
+ &type_list, 0, op_gen) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
- if(MPI_SUCCESS != (mpi_code = MPI_Type_dup(span_type, new_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_dup(span_type, new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* Release MPI data types generated during span tree traversal */
- if(H5S__release_datatype(&type_list) < 0)
+ if (H5S__release_datatype(&type_list) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "couldn't release MPI derived data type")
/* fill in the remaining return values */
- *count = 1;
+ *count = 1;
*is_derived_type = TRUE;
done:
/* Release resources */
- if(elmt_type_is_derived)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&elmt_type)))
+ if (elmt_type_is_derived)
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&elmt_type)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S__mpio_span_hyper_type() */
-
/*-------------------------------------------------------------------------
* Function: H5S__release_datatype
*
@@ -1063,8 +1063,8 @@ done:
static herr_t
H5S__release_datatype(H5S_mpio_mpitype_list_t *type_list)
{
- H5S_mpio_mpitype_node_t *curr; /* Pointer to head of list */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5S_mpio_mpitype_node_t *curr; /* Pointer to head of list */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1073,12 +1073,12 @@ H5S__release_datatype(H5S_mpio_mpitype_list_t *type_list)
/* Iterate over the list, freeing the MPI data types */
curr = type_list->head;
- while(curr) {
- H5S_mpio_mpitype_node_t *next; /* Pointer to next node in list */
- int mpi_code; /* MPI return status code */
+ while (curr) {
+ H5S_mpio_mpitype_node_t *next; /* Pointer to next node in list */
+ int mpi_code; /* MPI return status code */
/* Release the MPI data type for this span tree */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&curr->type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&curr->type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
/* Get pointer to next node in list */
@@ -1092,10 +1092,9 @@ H5S__release_datatype(H5S_mpio_mpitype_list_t *type_list)
} /* end while */
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S__release_datatype() */
-
/*-------------------------------------------------------------------------
* Function: H5S__obtain_datatype
*
@@ -1110,21 +1109,21 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
- size_t elmt_size, const MPI_Datatype *elmt_type, MPI_Datatype *span_type,
- H5S_mpio_mpitype_list_t *type_list, unsigned op_info_i, uint64_t op_gen)
+H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t elmt_size,
+ const MPI_Datatype *elmt_type, MPI_Datatype *span_type,
+ H5S_mpio_mpitype_list_t *type_list, unsigned op_info_i, uint64_t op_gen)
{
- H5S_hyper_span_t *span; /* Hyperslab span to iterate with */
- hsize_t bigio_count; /* Transition point to create derived type */
- size_t alloc_count = 0; /* Number of span tree nodes allocated at this level */
- size_t outercount = 0; /* Number of span tree nodes at this level */
- MPI_Datatype *inner_type = NULL;
- hbool_t inner_types_freed = FALSE; /* Whether the inner_type MPI datatypes have been freed */
- int *blocklen = NULL;
- MPI_Aint *disp = NULL;
- size_t u; /* Local index variable */
- int mpi_code; /* MPI return status code */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5S_hyper_span_t *span; /* Hyperslab span to iterate with */
+ hsize_t bigio_count; /* Transition point to create derived type */
+ size_t alloc_count = 0; /* Number of span tree nodes allocated at this level */
+ size_t outercount = 0; /* Number of span tree nodes at this level */
+ MPI_Datatype * inner_type = NULL;
+ hbool_t inner_types_freed = FALSE; /* Whether the inner_type MPI datatypes have been freed */
+ int * blocklen = NULL;
+ MPI_Aint * disp = NULL;
+ size_t u; /* Local index variable */
+ int mpi_code; /* MPI return status code */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1134,39 +1133,41 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
bigio_count = H5_mpi_get_bigio_count();
/* Check if we've visited this span tree before */
- if(spans->op_info[op_info_i].op_gen != op_gen) {
- H5S_mpio_mpitype_node_t *type_node; /* Pointer to new node in MPI data type list */
+ if (spans->op_info[op_info_i].op_gen != op_gen) {
+ H5S_mpio_mpitype_node_t *type_node; /* Pointer to new node in MPI data type list */
/* Allocate the initial displacement & block length buffers */
alloc_count = H5S_MPIO_INITIAL_ALLOC_COUNT;
- if(NULL == (disp = (MPI_Aint *)H5MM_malloc(alloc_count * sizeof(MPI_Aint))))
+ if (NULL == (disp = (MPI_Aint *)H5MM_malloc(alloc_count * sizeof(MPI_Aint))))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
- if(NULL == (blocklen = (int *)H5MM_malloc(alloc_count * sizeof(int))))
+ if (NULL == (blocklen = (int *)H5MM_malloc(alloc_count * sizeof(int))))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
/* If this is the fastest changing dimension, it is the base case for derived datatype. */
span = spans->head;
- if(NULL == span->down) {
- hbool_t large_block = FALSE; /* Wether the block length is larger than 32 bit integer */
+ if (NULL == span->down) {
+ hbool_t large_block = FALSE; /* Wether the block length is larger than 32 bit integer */
outercount = 0;
- while(span) {
- hsize_t nelmts; /* # of elements covered by current span */
+ while (span) {
+ hsize_t nelmts; /* # of elements covered by current span */
/* Check if we need to increase the size of the buffers */
- if(outercount >= alloc_count) {
- MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
- int *tmp_blocklen; /* Temporary pointer to new block length buffer */
+ if (outercount >= alloc_count) {
+ MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
+ int * tmp_blocklen; /* Temporary pointer to new block length buffer */
/* Double the allocation count */
alloc_count *= 2;
/* Re-allocate the buffers */
- if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ if (NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL,
+ "can't allocate array of displacements")
disp = tmp_disp;
- if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ if (NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL,
+ "can't allocate array of block lengths")
blocklen = tmp_blocklen;
} /* end if */
@@ -1174,11 +1175,11 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
nelmts = (span->high - span->low) + 1;
/* Store displacement & block length */
- disp[outercount] = (MPI_Aint)elmt_size * (MPI_Aint)span->low;
+ disp[outercount] = (MPI_Aint)elmt_size * (MPI_Aint)span->low;
H5_CHECK_OVERFLOW(nelmts, hsize_t, int)
- blocklen[outercount] = (int)nelmts;
+ blocklen[outercount] = (int)nelmts;
- if(bigio_count < (hsize_t)blocklen[outercount])
+ if (bigio_count < (hsize_t)blocklen[outercount])
large_block = TRUE; /* at least one block type is large, so set this flag to true */
span = span->next;
@@ -1186,49 +1187,53 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
} /* end while */
/* Everything fits into integers, so cast them and use hindexed */
- if(bigio_count >= outercount && large_block == FALSE) {
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type, &spans->op_info[op_info_i].u.down_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
- } /* end if */
- else { /* LARGE_DATATYPE:: Something doesn't fit into a 32 bit integer */
- for(u = 0 ; u < outercount; u++) {
+ if (bigio_count >= outercount && large_block == FALSE) {
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type,
+ &spans->op_info[op_info_i].u.down_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+ } /* end if */
+ else { /* LARGE_DATATYPE:: Something doesn't fit into a 32 bit integer */
+ for (u = 0; u < outercount; u++) {
MPI_Datatype temp_type = MPI_DATATYPE_NULL;
/* create the block type from elmt_type while checking the 32 bit int limit */
- if((hsize_t)(blocklen[u]) > bigio_count) {
- if(H5_mpio_create_large_type((hsize_t)blocklen[u], 0, *elmt_type, &temp_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large element datatype in span_hyper selection")
+ if ((hsize_t)(blocklen[u]) > bigio_count) {
+ if (H5_mpio_create_large_type((hsize_t)blocklen[u], 0, *elmt_type, &temp_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large element datatype in span_hyper selection")
} /* end if */
- else
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)blocklen[u], *elmt_type, &temp_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ else if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_contiguous((int)blocklen[u], *elmt_type, &temp_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
/* Combine the current datatype that is created with this current block type */
- if(0 == u) /* first iteration, there is no combined datatype yet */
+ if (0 == u) /* first iteration, there is no combined datatype yet */
spans->op_info[op_info_i].u.down_type = temp_type;
else {
- int bl[2] = {1, 1};
- MPI_Aint ds[2] = {disp[u - 1], disp[u]};
+ int bl[2] = {1, 1};
+ MPI_Aint ds[2] = {disp[u - 1], disp[u]};
MPI_Datatype dt[2] = {spans->op_info[op_info_i].u.down_type, temp_type};
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(2, /* count */
- bl, /* blocklength */
- ds, /* stride in bytes*/
- dt, /* old type */
- &spans->op_info[op_info_i].u.down_type))) /* new type */
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(
+ 2, /* count */
+ bl, /* blocklength */
+ ds, /* stride in bytes*/
+ dt, /* old type */
+ &spans->op_info[op_info_i].u.down_type))) /* new type */
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
/* Release previous temporary datatype */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&temp_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&temp_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
} /* end else */
- } /* end for */
- } /* end else (LARGE_DATATYPE::) */
- } /* end if */
+ } /* end for */
+ } /* end else (LARGE_DATATYPE::) */
+ } /* end if */
else {
- MPI_Aint stride; /* Distance between inner MPI datatypes */
+ MPI_Aint stride; /* Distance between inner MPI datatypes */
- if(NULL == (inner_type = (MPI_Datatype *)H5MM_malloc(alloc_count * sizeof(MPI_Datatype))))
+ if (NULL == (inner_type = (MPI_Datatype *)H5MM_malloc(alloc_count * sizeof(MPI_Datatype))))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
/* Calculate the total bytes of the lower dimension */
@@ -1236,39 +1241,44 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
/* Loop over span nodes */
outercount = 0;
- while(span) {
- MPI_Datatype down_type; /* Temporary MPI datatype for a span tree node's children */
- hsize_t nelmts; /* # of elements covered by current span */
+ while (span) {
+ MPI_Datatype down_type; /* Temporary MPI datatype for a span tree node's children */
+ hsize_t nelmts; /* # of elements covered by current span */
/* Check if we need to increase the size of the buffers */
- if(outercount >= alloc_count) {
- MPI_Aint *tmp_disp; /* Temporary pointer to new displacement buffer */
- int *tmp_blocklen; /* Temporary pointer to new block length buffer */
- MPI_Datatype *tmp_inner_type; /* Temporary pointer to inner MPI datatype buffer */
+ if (outercount >= alloc_count) {
+ MPI_Aint * tmp_disp; /* Temporary pointer to new displacement buffer */
+ int * tmp_blocklen; /* Temporary pointer to new block length buffer */
+ MPI_Datatype *tmp_inner_type; /* Temporary pointer to inner MPI datatype buffer */
/* Double the allocation count */
alloc_count *= 2;
/* Re-allocate the buffers */
- if(NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of displacements")
+ if (NULL == (tmp_disp = (MPI_Aint *)H5MM_realloc(disp, alloc_count * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL,
+ "can't allocate array of displacements")
disp = tmp_disp;
- if(NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of block lengths")
+ if (NULL == (tmp_blocklen = (int *)H5MM_realloc(blocklen, alloc_count * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL,
+ "can't allocate array of block lengths")
blocklen = tmp_blocklen;
- if(NULL == (tmp_inner_type = (MPI_Datatype *)H5MM_realloc(inner_type, alloc_count * sizeof(MPI_Datatype))))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of inner MPI datatypes")
+ if (NULL == (tmp_inner_type = (MPI_Datatype *)H5MM_realloc(
+ inner_type, alloc_count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL,
+ "can't allocate array of inner MPI datatypes")
inner_type = tmp_inner_type;
} /* end if */
/* Displacement should be in byte and should have dimension information */
/* First using MPI Type vector to build derived data type for this span only */
/* Need to calculate the disp in byte for this dimension. */
- disp[outercount] = (MPI_Aint)span->low * stride;
- blocklen[outercount] = 1;
+ disp[outercount] = (MPI_Aint)span->low * stride;
+ blocklen[outercount] = 1;
/* Generate MPI datatype for next dimension down */
- if(H5S__obtain_datatype(span->down, down + 1, elmt_size, elmt_type, &down_type, type_list, op_info_i, op_gen) < 0)
+ if (H5S__obtain_datatype(span->down, down + 1, elmt_size, elmt_type, &down_type, type_list,
+ op_info_i, op_gen) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type")
/* Compute the number of elements to attempt in this span */
@@ -1276,27 +1286,29 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
/* Build the MPI datatype for this node */
H5_CHECK_OVERFLOW(nelmts, hsize_t, int)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)nelmts, 1, stride, down_type, &inner_type[outercount])))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)nelmts, 1, stride, down_type,
+ &inner_type[outercount])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
span = span->next;
outercount++;
- } /* end while */
+ } /* end while */
/* Building the whole vector datatype */
H5_CHECK_OVERFLOW(outercount, size_t, int)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)outercount, blocklen, disp, inner_type, &spans->op_info[op_info_i].u.down_type)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)outercount, blocklen, disp, inner_type,
+ &spans->op_info[op_info_i].u.down_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
/* Release inner node types */
- for(u = 0; u < outercount; u++)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
+ for (u = 0; u < outercount; u++)
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
inner_types_freed = TRUE;
} /* end else */
/* Allocate space for the MPI data type list node */
- if(NULL == (type_node = H5FL_MALLOC(H5S_mpio_mpitype_node_t)))
+ if (NULL == (type_node = H5FL_MALLOC(H5S_mpio_mpitype_node_t)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate MPI data type list node")
/* Set up MPI type node */
@@ -1304,11 +1316,11 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
type_node->next = NULL;
/* Add MPI type node to list */
- if(type_list->head == NULL)
+ if (type_list->head == NULL)
type_list->head = type_list->tail = type_node;
else {
type_list->tail->next = type_node;
- type_list->tail = type_node;
+ type_list->tail = type_node;
} /* end else */
/* Remember that we've visited this span tree */
@@ -1320,22 +1332,21 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down,
done:
/* General cleanup */
- if(inner_type != NULL) {
- if(!inner_types_freed)
- for(u = 0; u < outercount; u++)
- if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
+ if (inner_type != NULL) {
+ if (!inner_types_freed)
+ for (u = 0; u < outercount; u++)
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[u])))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
H5MM_free(inner_type);
} /* end if */
- if(blocklen != NULL)
+ if (blocklen != NULL)
H5MM_free(blocklen);
- if(disp != NULL)
+ if (disp != NULL)
H5MM_free(disp);
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S__obtain_datatype() */
-
/*-------------------------------------------------------------------------
* Function: H5S_mpio_space_type
*
@@ -1354,11 +1365,10 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
- int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute_map,
- hbool_t *is_permuted)
+H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count,
+ hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute_map, hbool_t *is_permuted)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -1367,7 +1377,7 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
HDassert(elmt_size);
/* Create MPI type based on the kind of selection */
- switch(H5S_GET_EXTENT_TYPE(space)) {
+ switch (H5S_GET_EXTENT_TYPE(space)) {
case H5S_NULL:
case H5S_SCALAR:
case H5S_SIMPLE:
@@ -1375,11 +1385,12 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
* out-of-order point selection, then permute this selection which
* should be a memory selection to match the file space permutation.
*/
- if(TRUE == *is_permuted) {
- switch(H5S_GET_SELECT_TYPE(space)) {
+ if (TRUE == *is_permuted) {
+ switch (H5S_GET_SELECT_TYPE(space)) {
case H5S_SEL_NONE:
- if(H5S__mpio_none_type(new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'none' selection to MPI type")
+ if (H5S__mpio_none_type(new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't convert 'none' selection to MPI type")
break;
case H5S_SEL_ALL:
@@ -1388,8 +1399,10 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
/* Sanity check */
HDassert(!do_permute);
- if(H5S__mpio_permute_type(space, elmt_size, permute_map, new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'all' selection to MPI type")
+ if (H5S__mpio_permute_type(space, elmt_size, permute_map, new_type, count,
+ is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't convert 'all' selection to MPI type")
break;
case H5S_SEL_ERROR:
@@ -1398,33 +1411,40 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
HDassert("unknown selection type" && 0);
break;
} /* end switch */
- } /* end if */
+ } /* end if */
/* the file space is not permuted, so do a regular selection */
else {
- switch(H5S_GET_SELECT_TYPE(space)) {
+ switch (H5S_GET_SELECT_TYPE(space)) {
case H5S_SEL_NONE:
- if(H5S__mpio_none_type(new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'none' selection to MPI type")
+ if (H5S__mpio_none_type(new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't convert 'none' selection to MPI type")
break;
case H5S_SEL_ALL:
- if(H5S__mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'all' selection to MPI type")
+ if (H5S__mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't convert 'all' selection to MPI type")
break;
case H5S_SEL_POINTS:
- if(H5S__mpio_point_type(space, elmt_size, new_type, count, is_derived_type, do_permute, permute_map, is_permuted) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'point' selection to MPI type")
+ if (H5S__mpio_point_type(space, elmt_size, new_type, count, is_derived_type,
+ do_permute, permute_map, is_permuted) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't convert 'point' selection to MPI type")
break;
case H5S_SEL_HYPERSLABS:
- if((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
- if(H5S__mpio_reg_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert regular 'hyperslab' selection to MPI type")
+ if ((H5S_SELECT_IS_REGULAR(space) == TRUE)) {
+ if (H5S__mpio_reg_hyper_type(space, elmt_size, new_type, count, is_derived_type) <
+ 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't convert regular 'hyperslab' selection to MPI type")
} /* end if */
- else
- if(H5S__mpio_span_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert irregular 'hyperslab' selection to MPI type")
+ else if (H5S__mpio_span_hyper_type(space, elmt_size, new_type, count,
+ is_derived_type) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't convert irregular 'hyperslab' selection to MPI type")
break;
case H5S_SEL_ERROR:
@@ -1433,7 +1453,7 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
HDassert("unknown selection type" && 0);
break;
} /* end switch */
- } /* end else */
+ } /* end else */
break;
case H5S_NO_CLASS:
@@ -1446,5 +1466,4 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_mpio_space_type() */
-#endif /* H5_HAVE_PARALLEL */
-
+#endif /* H5_HAVE_PARALLEL */