diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2019-02-13 03:51:15 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2019-02-13 03:51:15 (GMT) |
commit | b5305b25a67c41f0d87352d39e799b603ed10c69 (patch) | |
tree | d7f5b041e55612ce9681bb02950d7942a4bad488 /src/H5Smpio.c | |
parent | 112b8131965104cbed0746391893f211d79888ad (diff) | |
download | hdf5-b5305b25a67c41f0d87352d39e799b603ed10c69.zip hdf5-b5305b25a67c41f0d87352d39e799b603ed10c69.tar.gz hdf5-b5305b25a67c41f0d87352d39e799b603ed10c69.tar.bz2 |
Align develop with incoming hyperslab_updates branch changes.
Diffstat (limited to 'src/H5Smpio.c')
-rw-r--r-- | src/H5Smpio.c | 811 |
1 files changed, 374 insertions, 437 deletions
diff --git a/src/H5Smpio.c b/src/H5Smpio.c index 935d279..2ebe987 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -14,15 +14,20 @@ /* * Programmer: rky 980813 * - * Purpose: Functions to read/write directly between app buffer and file. + * Purpose: Create MPI data types for HDF5 selections. * - * Beware of the ifdef'ed print statements. - * I didn't make them portable. */ +/****************/ +/* Module Setup */ +/****************/ + #include "H5Smodule.h" /* This source code file is part of the H5S module */ +/***********/ +/* Headers */ +/***********/ #include "H5private.h" /* Generic Functions */ #include "H5Dprivate.h" /* Datasets */ #include "H5Eprivate.h" /* Error handling */ @@ -37,37 +42,61 @@ #ifdef H5_HAVE_PARALLEL -static herr_t H5S_mpio_all_type(const H5S_t *space, size_t elmt_size, +/****************/ +/* Local Macros */ +/****************/ +#define H5S_MPIO_INITIAL_ALLOC_COUNT 256 +#define TWO_GIG_LIMIT 2147483648 +#ifndef H5S_MAX_MPI_COUNT +#define H5S_MAX_MPI_COUNT 536870911 /* (2^29)-1 */ +#endif + + +/******************/ +/* Local Typedefs */ +/******************/ + + +/********************/ +/* Local Prototypes */ +/********************/ +static herr_t H5S__mpio_all_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type); -static herr_t H5S_mpio_none_type(MPI_Datatype *new_type, int *count, +static herr_t H5S__mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type); -static herr_t H5S_mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, +static herr_t H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint *disp, MPI_Datatype *new_type); -static herr_t H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, +static herr_t H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute_map, hbool_t *is_permuted); -static herr_t H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, +static herr_t H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute_map, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type); -static herr_t H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, +static herr_t H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type); -static herr_t H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, +static herr_t H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type); -static herr_t H5S_obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span, +static herr_t H5S__obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span, const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size); -static herr_t H5S_mpio_create_large_type (hsize_t, MPI_Aint, MPI_Datatype , MPI_Datatype *); +static herr_t H5S__mpio_create_large_type(hsize_t, MPI_Aint, MPI_Datatype , MPI_Datatype *); -#define H5S_MPIO_INITIAL_ALLOC_COUNT 256 +/*****************************/ +/* Library Private Variables */ +/*****************************/ -#define TWO_GIG_LIMIT 2147483648 -#ifndef H5S_MAX_MPI_COUNT -#define H5S_MAX_MPI_COUNT 536870911 /* (2^29)-1 */ -#endif +/*********************/ +/* Package Variables */ +/*********************/ + +/*******************/ +/* Local Variables */ +/*******************/ static hsize_t bigio_count = H5S_MAX_MPI_COUNT; + /*------------------------------------------------------------------------- * Function: H5S_mpio_set_bigio_count * @@ -75,7 +104,7 @@ static hsize_t bigio_count = H5S_MAX_MPI_COUNT; * when we utilize derived datatypes. This is of * particular interest for allowing nightly testing * - * Return: the current/previous value of bigio_count. + * Return: The current/previous value of bigio_count. * * Programmer: Richard Warren, March 10, 2017 * @@ -85,19 +114,20 @@ hsize_t H5S_mpio_set_bigio_count(hsize_t new_count) { hsize_t orig_count = bigio_count; - if ((new_count > 0) && (new_count < TWO_GIG_LIMIT)) { + + if((new_count > 0) && (new_count < TWO_GIG_LIMIT)) bigio_count = new_count; - } + return orig_count; -} +} /* end H5S_mpio_set_bigio_count() */ /*------------------------------------------------------------------------- - * Function: H5S_mpio_all_type + * Function: H5S__mpio_all_type * * Purpose: Translate an HDF5 "all" selection into an MPI type. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * *count how many objects of the new_type in selection @@ -105,16 +135,11 @@ H5S_mpio_set_bigio_count(hsize_t new_count) * *is_derived_type 0 if MPI primitive type, 1 if derived * * Programmer: rky 980813 - * Modifications: - * Mohamad Chaarawi - * Adding support for large datatypes (beyond the limit of a - * 32 bit integer. - * * *------------------------------------------------------------------------- */ static herr_t -H5S_mpio_all_type(const H5S_t *space, size_t elmt_size, +H5S__mpio_all_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) { hsize_t total_bytes; @@ -122,7 +147,7 @@ H5S_mpio_all_type(const H5S_t *space, size_t elmt_size, hsize_t nelmts; /* Total number of elmts */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Check args */ HDassert(space); @@ -133,34 +158,33 @@ H5S_mpio_all_type(const H5S_t *space, size_t elmt_size, H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t); total_bytes = (hsize_t)elmt_size * nelmts; + /* Verify that the size can be expressed as a 32 bit integer */ if(bigio_count >= total_bytes) { - /* fill in the return values */ - *new_type = MPI_BYTE; - H5_CHECKED_ASSIGN(*count, int, total_bytes, hsize_t); - *is_derived_type = FALSE; - } + /* fill in the return values */ + *new_type = MPI_BYTE; + H5_CHECKED_ASSIGN(*count, int, total_bytes, hsize_t); + *is_derived_type = FALSE; + } /* end if */ else { - /* Create a LARGE derived datatype for this transfer */ - if (H5S_mpio_create_large_type (total_bytes, 0, MPI_BYTE, new_type) < 0) { - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, - "couldn't create a large datatype from the all selection") - } - *count = 1; - *is_derived_type = TRUE; - } + /* Create a LARGE derived datatype for this transfer */ + if(H5S__mpio_create_large_type(total_bytes, 0, MPI_BYTE, new_type) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large datatype from the all selection") + *count = 1; + *is_derived_type = TRUE; + } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5S_mpio_all_type() */ +} /* H5S__mpio_all_type() */ /*------------------------------------------------------------------------- - * Function: H5S_mpio_none_type + * Function: H5S__mpio_none_type * * Purpose: Translate an HDF5 "none" selection into an MPI type. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * *count how many objects of the new_type in selection @@ -172,9 +196,9 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5S_mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) +H5S__mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) { - FUNC_ENTER_NOAPI_NOINIT_NOERR + FUNC_ENTER_STATIC_NOERR /* fill in the return values */ *new_type = MPI_BYTE; @@ -182,15 +206,15 @@ H5S_mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) *is_derived_type = FALSE; FUNC_LEAVE_NOAPI(SUCCEED) -} /* H5S_mpio_none_type() */ +} /* H5S__mpio_none_type() */ /*------------------------------------------------------------------------- - * Function: H5S_mpio_create_point_datatype + * Function: H5S__mpio_create_point_datatype * * Purpose: Create a derived datatype for point selections. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * @@ -198,18 +222,23 @@ H5S_mpio_none_type(MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) * *------------------------------------------------------------------------- */ -static herr_t -H5S_mpio_create_point_datatype (size_t elmt_size, hsize_t num_points, - MPI_Aint *disp, MPI_Datatype *new_type) +static herr_t +H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, + MPI_Aint *disp, MPI_Datatype *new_type) { MPI_Datatype elmt_type; /* MPI datatype for individual element */ hbool_t elmt_type_created = FALSE; /* Whether the element MPI datatype was created */ - int mpi_code; /* MPI error code */ + int *inner_blocks = NULL; /* Arrays for MPI datatypes when "large" datatype needed */ + MPI_Aint *inner_disps = NULL; + MPI_Datatype *inner_types = NULL; +#if MPI_VERSION < 3 int *blocks = NULL; /* Array of block sizes for MPI hindexed create call */ hsize_t u; /* Local index variable */ +#endif + int mpi_code; /* MPI error code */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Create an MPI datatype for an element */ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type))) @@ -219,146 +248,127 @@ H5S_mpio_create_point_datatype (size_t elmt_size, hsize_t num_points, /* Check whether standard or BIGIO processing will be employeed */ if(bigio_count >= num_points) { #if MPI_VERSION >= 3 - /* Create an MPI datatype for the whole point selection */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code) + /* Create an MPI datatype for the whole point selection */ + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code) #else - /* Allocate block sizes for MPI datatype call */ - if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") + /* Allocate block sizes for MPI datatype call */ + if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") - for(u = 0; u < num_points; u++) - blocks[u] = 1; + for(u = 0; u < num_points; u++) + blocks[u] = 1; - /* Create an MPI datatype for the whole point selection */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) + /* Create an MPI datatype for the whole point selection */ + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) #endif - /* Commit MPI datatype for later use */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) - } - else { - /* use LARGE_DATATYPE:: - * We'll create an hindexed_block type for every 2G point count and then combine - * those and any remaining points into a single large datatype. - */ - int total_types, i; - int remaining_points; - int num_big_types; - hsize_t leftover; - - int *inner_blocks; - MPI_Aint *inner_disps; - MPI_Datatype *inner_types = NULL; + /* Commit MPI datatype for later use */ + if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) + } /* end if */ + else { + /* use LARGE_DATATYPE:: + * We'll create an hindexed_block type for every 2G point count and then combine + * those and any remaining points into a single large datatype. + */ + int total_types, i; + int remaining_points; + int num_big_types; + hsize_t leftover; - /* Calculate how many Big MPI datatypes are needed to represent the buffer */ - num_big_types = (int)(num_points/bigio_count); + /* Calculate how many Big MPI datatypes are needed to represent the buffer */ + num_big_types = (int)(num_points / bigio_count); - leftover = (hsize_t)num_points - (hsize_t)num_big_types * (hsize_t)bigio_count; - H5_CHECKED_ASSIGN(remaining_points, int, leftover, hsize_t); + leftover = (hsize_t)num_points - (hsize_t)num_big_types * (hsize_t)bigio_count; + H5_CHECKED_ASSIGN(remaining_points, int, leftover, hsize_t); - total_types = (int)(remaining_points) ? (num_big_types + 1) : num_big_types; + total_types = (int)(remaining_points) ? (num_big_types + 1) : num_big_types; - /* Allocate array if MPI derived types needed */ - if(NULL == (inner_types = (MPI_Datatype *)H5MM_malloc((sizeof(MPI_Datatype) * (size_t)total_types)))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") + /* Allocate array if MPI derived types needed */ + if(NULL == (inner_types = (MPI_Datatype *)H5MM_malloc((sizeof(MPI_Datatype) * (size_t)total_types)))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") - if(NULL == (inner_blocks = (int *)H5MM_malloc(sizeof(int) * (size_t)total_types))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") + if(NULL == (inner_blocks = (int *)H5MM_malloc(sizeof(int) * (size_t)total_types))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") - if(NULL == (inner_disps = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * (size_t)total_types))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") + if(NULL == (inner_disps = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * (size_t)total_types))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") #if MPI_VERSION < 3 - /* Allocate block sizes for MPI datatype call */ - if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * bigio_count))) - HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") + /* Allocate block sizes for MPI datatype call */ + if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * bigio_count))) + HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks") - for(u = 0; u < bigio_count; u++) - blocks[u] = 1; + for(u = 0; u < bigio_count; u++) + blocks[u] = 1; #endif - for(i=0 ; i<num_big_types ; i++) { + for(i = 0; i < num_big_types; i++) { #if MPI_VERSION >= 3 - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(bigio_count, - 1, - &disp[i*bigio_count], - elmt_type, - &inner_types[i]))) { - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code); - } + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(bigio_count, + 1, &disp[i*bigio_count], elmt_type, &inner_types[i]))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code) #else - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)bigio_count, - blocks, - &disp[i*bigio_count], - elmt_type, - &inner_types[i]))) { - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) - } + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)bigio_count, + blocks, &disp[i*bigio_count], elmt_type, &inner_types[i]))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) #endif - inner_blocks[i] = 1; - inner_disps[i] = 0; - } + inner_blocks[i] = 1; + inner_disps[i] = 0; + } /* end for*/ - if(remaining_points) { + if(remaining_points) { #if MPI_VERSION >= 3 - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(remaining_points, - 1, - &disp[num_big_types*bigio_count], - elmt_type, - &inner_types[num_big_types]))) { - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code); - } + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(remaining_points, + 1, &disp[num_big_types*bigio_count], elmt_type, &inner_types[num_big_types]))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code) #else - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)remaining_points, - blocks, - &disp[num_big_types*bigio_count], - elmt_type, - &inner_types[num_big_types]))) { - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) - } + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)remaining_points, + blocks, &disp[num_big_types*bigio_count], elmt_type, &inner_types[num_big_types]))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) #endif - inner_blocks[num_big_types] = 1; - inner_disps[num_big_types] = 0; - } - - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(total_types, - inner_blocks, - inner_disps, - inner_types, - new_type))) { - HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct", mpi_code); - } - for(i=0 ; i<total_types ; i++) - MPI_Type_free(&inner_types[i]); - - H5MM_free(inner_types); - H5MM_free(inner_blocks); - H5MM_free(inner_disps); - - /* Commit MPI datatype for later use */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) - } + inner_blocks[num_big_types] = 1; + inner_disps[num_big_types] = 0; + } /* end if */ + + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(total_types, + inner_blocks, inner_disps, inner_types, new_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct", mpi_code) + for(i = 0; i < total_types; i++) + MPI_Type_free(&inner_types[i]); + + /* Commit MPI datatype for later use */ + if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) + } /* end else */ + done: if(elmt_type_created) MPI_Type_free(&elmt_type); +#if MPI_VERSION < 3 if(blocks) H5MM_free(blocks); +#endif + if(inner_types) + H5MM_free(inner_types); + if(inner_blocks) + H5MM_free(inner_blocks); + if(inner_disps) + H5MM_free(inner_disps); FUNC_LEAVE_NOAPI(ret_value) -} /* H5S_mpio_create_point_datatype() */ +} /* H5S__mpio_create_point_datatype() */ /*------------------------------------------------------------------------- - * Function: H5S_mpio_point_type + * Function: H5S__mpio_point_type * * Purpose: Translate an HDF5 "point" selection into an MPI type. * Create a permutation array to handle out-of-order point selections. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * *count how many objects of the new_type in selection @@ -373,7 +383,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, +H5S__mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute, hbool_t *is_permuted) { @@ -384,7 +394,7 @@ H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type hsize_t u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Check args */ HDassert(space); @@ -406,23 +416,23 @@ H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type /* Iterate through list of elements */ curr = space->select.sel_info.pnt_lst->head; for(u = 0 ; u < num_points ; u++) { - /* calculate the displacement of the current point */ + /* Calculate the displacement of the current point */ disp[u] = H5VM_array_offset(space->extent.rank, space->extent.size, curr->pnt); disp[u] *= elmt_size; - /* This is a File Space used to set the file view, so adjust the displacements + /* This is a File Space used to set the file view, so adjust the displacements * to have them monotonically non-decreasing. - * Generate the permutation array by indicating at each point being selected, - * the position it will shifted in the new displacement. Example: - * Suppose 4 points with corresponding are selected - * Pt 1: disp=6 ; Pt 2: disp=3 ; Pt 3: disp=0 ; Pt 4: disp=4 + * Generate the permutation array by indicating at each point being selected, + * the position it will shifted in the new displacement. Example: + * Suppose 4 points with corresponding are selected + * Pt 1: disp=6 ; Pt 2: disp=3 ; Pt 3: disp=0 ; Pt 4: disp=4 * The permute map to sort the displacements in order will be: * point 1: map[0] = L, indicating that this point is not moved (1st point selected) - * point 2: map[1] = 0, indicating that this point is moved to the first position, + * point 2: map[1] = 0, indicating that this point is moved to the first position, * since disp_pt1(6) > disp_pt2(3) - * point 3: map[2] = 0, move to position 0, bec it has the lowest disp between + * point 3: map[2] = 0, move to position 0, bec it has the lowest disp between * the points selected so far. - * point 4: map[3] = 2, move the 2nd position since point 1 has a higher disp, + * point 4: map[3] = 2, move the 2nd position since point 1 has a higher disp, * but points 2 and 3 have lower displacements. */ if(do_permute) { @@ -447,7 +457,7 @@ H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type HDmemmove(disp + m + 1, disp + m, (u - m) * sizeof(MPI_Aint)); disp[m] = temp; } /* end if */ - (*permute)[u] = m; + (*permute)[u] = m; } /* end if */ else (*permute)[u] = num_points; @@ -455,7 +465,7 @@ H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type /* this is a memory space, and no permutation is necessary to create the derived datatype */ else { - ;/* do nothing */ + ; /* do nothing */ } /* end else */ /* get the next point */ @@ -463,7 +473,7 @@ H5S_mpio_point_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type } /* end for */ /* Create the MPI datatype for the set of element displacements */ - if(H5S_mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0) + if(H5S__mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create an MPI Datatype from point selection") /* Set values about MPI datatype created */ @@ -481,11 +491,11 @@ done: } /* end if */ FUNC_LEAVE_NOAPI(ret_value) -} /* H5S_mpio_point_type() */ +} /* H5S__mpio_point_type() */ /*------------------------------------------------------------------------- - * Function: H5S_mpio_permute_type + * Function: H5S__mpio_permute_type * * Purpose: Translate an HDF5 "all/hyper/point" selection into an MPI type, * while applying the permutation map. This function is called if @@ -496,7 +506,7 @@ done: * Note: This routine is called from H5S_mpio_space_type(), which is * called first for the file dataspace and creates * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * *count how many objects of the new_type in selection @@ -508,7 +518,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute, +H5S__mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) { MPI_Aint *disp = NULL; /* Datatype displacement for each point*/ @@ -520,7 +530,7 @@ H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute, hsize_t u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Check args */ HDassert(space); @@ -571,12 +581,12 @@ H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute, /* Set the displacement of the current point */ disp[u] = curr_off; - /* This is a memory displacement, so for each point selected, + /* This is a memory displacement, so for each point selected, * apply the map that was generated by the file selection */ if((*permute)[u] != num_points) { MPI_Aint temp = disp[u]; - HDmemmove(disp + (*permute)[u] + 1, disp + (*permute)[u], + HDmemmove(disp + (*permute)[u] + 1, disp + (*permute)[u], (u - (*permute)[u]) * sizeof(MPI_Aint)); disp[(*permute)[u]] = temp; } /* end if */ @@ -597,7 +607,7 @@ H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute, } /* end while */ /* Create the MPI datatype for the set of element displacements */ - if(H5S_mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0) + if(H5S__mpio_create_point_datatype(elmt_size, num_points, disp, new_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create an MPI Datatype from point selection") /* Set values about MPI datatype created */ @@ -619,15 +629,15 @@ done: } /* end if */ FUNC_LEAVE_NOAPI(ret_value) -} /* H5S_mpio_permute_type() */ +} /* H5S__mpio_permute_type() */ /*------------------------------------------------------------------------- - * Function: H5S_mpio_hyper_type + * Function: H5S__mpio_reg_hyper_type * - * Purpose: Translate an HDF5 hyperslab selection into an MPI type. + * Purpose: Translate a regular HDF5 hyperslab selection into an MPI type. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * *count how many objects of the new_type in selection @@ -635,14 +645,11 @@ done: * *is_derived_type 0 if MPI primitive type, 1 if derived * * Programmer: rky 980813 - * Modifications: - * Mohamad Chaarawi - * Adding support for large datatypes (beyond the limit of a - * 32 bit integer. + * *------------------------------------------------------------------------- */ static herr_t -H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, +H5S__mpio_reg_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) { H5S_sel_iter_t sel_iter; /* Selection iteration info */ @@ -668,7 +675,7 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, int mpi_code; /* MPI return code */ herr_t ret_value = SUCCEED; - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Check args */ HDassert(space); @@ -683,16 +690,15 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, diminfo = sel_iter.u.hyp.diminfo; HDassert(diminfo); - /* make a local copy of the dimension info so we can operate with them */ + /* Make a local copy of the dimension info so we can operate with them */ /* Check if this is a "flattened" regular hyperslab selection */ if(sel_iter.u.hyp.iter_rank != 0 && sel_iter.u.hyp.iter_rank < space->extent.rank) { /* Flattened selection */ rank = sel_iter.u.hyp.iter_rank; - HDassert(rank <= H5S_MAX_RANK); /* within array bounds */ #ifdef H5S_DEBUG - if(H5DEBUG(S)) - HDfprintf(H5DEBUG(S), "%s: Flattened selection\n",FUNC); +if(H5DEBUG(S)) + HDfprintf(H5DEBUG(S), "%s: Flattened selection\n",FUNC); #endif for(u = 0; u < rank; ++u) { H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t) @@ -701,33 +707,30 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, d[u].block = diminfo[u].block; d[u].count = diminfo[u].count; d[u].xtent = sel_iter.u.hyp.size[u]; + #ifdef H5S_DEBUG - if(H5DEBUG(S)){ - HDfprintf(H5DEBUG(S), "%s: start=%Hd stride=%Hu count=%Hu block=%Hu xtent=%Hu", - FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent ); - if (u==0) - HDfprintf(H5DEBUG(S), " rank=%u\n", rank ); - else - HDfprintf(H5DEBUG(S), "\n" ); - } +if(H5DEBUG(S)) { + HDfprintf(H5DEBUG(S), "%s: start=%Hd stride=%Hu count=%Hu block=%Hu xtent=%Hu", + FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent); + if(u == 0) + HDfprintf(H5DEBUG(S), " rank=%u\n", rank); + else + HDfprintf(H5DEBUG(S), "\n"); +} #endif - if(0 == d[u].block) - goto empty; - if(0 == d[u].count) - goto empty; - if(0 == d[u].xtent) - goto empty; + + /* Sanity check */ + HDassert(d[u].block > 0); + HDassert(d[u].count > 0); + HDassert(d[u].xtent > 0); } /* end for */ } /* end if */ else { /* Non-flattened selection */ rank = space->extent.rank; - HDassert(rank <= H5S_MAX_RANK); /* within array bounds */ - if(0 == rank) - goto empty; #ifdef H5S_DEBUG - if(H5DEBUG(S)) - HDfprintf(H5DEBUG(S),"%s: Non-flattened selection\n",FUNC); +if(H5DEBUG(S)) + HDfprintf(H5DEBUG(S),"%s: Non-flattened selection\n",FUNC); #endif for(u = 0; u < rank; ++u) { H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t) @@ -736,22 +739,22 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, d[u].block = diminfo[u].block; d[u].count = diminfo[u].count; d[u].xtent = space->extent.size[u]; + #ifdef H5S_DEBUG - if(H5DEBUG(S)){ +if(H5DEBUG(S)) { HDfprintf(H5DEBUG(S), "%s: start=%Hd stride=%Hu count=%Hu block=%Hu xtent=%Hu", - FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent ); - if (u==0) - HDfprintf(H5DEBUG(S), " rank=%u\n", rank ); + FUNC, d[u].start, d[u].strid, d[u].count, d[u].block, d[u].xtent); + if(u == 0) + HDfprintf(H5DEBUG(S), " rank=%u\n", rank); else - HDfprintf(H5DEBUG(S), "\n" ); - } + HDfprintf(H5DEBUG(S), "\n"); +} #endif - if(0 == d[u].block) - goto empty; - if(0 == d[u].count) - goto empty; - if(0 == d[u].xtent) - goto empty; + + /* Sanity check */ + HDassert(d[u].block > 0); + HDassert(d[u].count > 0); + HDassert(d[u].xtent > 0); } /* end for */ } /* end else */ @@ -762,56 +765,52 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, offset[rank - 1] = 1; max_xtent[rank - 1] = d[rank - 1].xtent; #ifdef H5S_DEBUG - if(H5DEBUG(S)) { - i = ((int)rank) - 1; - HDfprintf(H5DEBUG(S), " offset[%2d]=%Hu; max_xtent[%2d]=%Hu\n", - i, offset[i], i, max_xtent[i]); - } +if(H5DEBUG(S)) { + i = ((int)rank) - 1; + HDfprintf(H5DEBUG(S), " offset[%2d]=%Hu; max_xtent[%2d]=%Hu\n", i, offset[i], i, max_xtent[i]); +} #endif for(i = ((int)rank) - 2; i >= 0; --i) { offset[i] = offset[i + 1] * d[i + 1].xtent; max_xtent[i] = max_xtent[i + 1] * d[i].xtent; #ifdef H5S_DEBUG - if(H5DEBUG(S)) - HDfprintf(H5DEBUG(S), " offset[%2d]=%Hu; max_xtent[%2d]=%Hu\n", - i, offset[i], i, max_xtent[i]); +if(H5DEBUG(S)) + HDfprintf(H5DEBUG(S), " offset[%2d]=%Hu; max_xtent[%2d]=%Hu\n", i, offset[i], i, max_xtent[i]); #endif } /* end for */ /* Create a type covering the selected hyperslab. * Multidimensional dataspaces are stored in row-major order. * The type is built from the inside out, going from the - * fastest-changing (i.e., inner) dimension * to the slowest (outer). */ + * fastest-changing (i.e., inner) dimension * to the slowest (outer). + */ /******************************************************* * Construct contig type for inner contig dims: *******************************************************/ #ifdef H5S_DEBUG - if(H5DEBUG(S)) { +if(H5DEBUG(S)) { HDfprintf(H5DEBUG(S), "%s: Making contig type %Zu MPI_BYTEs\n", FUNC, elmt_size); for(i = ((int)rank) - 1; i >= 0; --i) HDfprintf(H5DEBUG(S), "d[%d].xtent=%Hu \n", i, d[i].xtent); - } +} #endif /* LARGE_DATATYPE:: - * Check if the number of elements to form the inner type fits into a 32 bit integer. + * Check if the number of elements to form the inner type fits into a 32 bit integer. * If yes then just create the innertype with MPI_Type_contiguous. * Otherwise create a compound datatype by iterating as many times as needed * for the innertype to be created. */ if(bigio_count >= elmt_size) { - /* Use a single MPI datatype that has a 32 bit size */ - if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type))) - HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) - } - else { - /* Create the compound datatype for this operation (> 2GB) */ - if (H5S_mpio_create_large_type (elmt_size, 0, MPI_BYTE, &inner_type) < 0) { - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, - "couldn't ccreate a large inner datatype in hyper selection") - } - } + /* Use a single MPI datatype that has a 32 bit size */ + if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) + } /* end if */ + else + /* Create the compound datatype for this operation (> 2GB) */ + if(H5S__mpio_create_large_type(elmt_size, 0, MPI_BYTE, &inner_type) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large inner datatype in hyper selection") /******************************************************* * Construct the type by walking the hyperslab dims @@ -819,65 +818,57 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, *******************************************************/ for(i = ((int)rank) - 1; i >= 0; --i) { #ifdef H5S_DEBUG - if(H5DEBUG(S)) - HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n" - "start=%Hd count=%Hu block=%Hu stride=%Hu, xtent=%Hu max_xtent=%d\n", - FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]); +if(H5DEBUG(S)) + HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n" + "start=%Hd count=%Hu block=%Hu stride=%Hu, xtent=%Hu max_xtent=%d\n", + FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]); #endif #ifdef H5S_DEBUG - if(H5DEBUG(S)) - HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i); +if(H5DEBUG(S)) + HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i); #endif /**************************************** - * Build vector type of the selection. - ****************************************/ - if (bigio_count >= d[i].count && - bigio_count >= d[i].block && - bigio_count >= d[i].strid) { - - /* All the parameters fit into 32 bit integers so create the vector type normally */ - mpi_code = MPI_Type_vector((int)(d[i].count), /* count */ - (int)(d[i].block), /* blocklength */ - (int)(d[i].strid), /* stride */ - inner_type, /* old type */ - &outer_type); /* new type */ - - MPI_Type_free(&inner_type); - if(mpi_code != MPI_SUCCESS) - HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code) - } + * Build vector type of the selection. + ****************************************/ + if(bigio_count >= d[i].count && + bigio_count >= d[i].block && bigio_count >= d[i].strid) { + /* All the parameters fit into 32 bit integers so create the vector type normally */ + mpi_code = MPI_Type_vector((int)(d[i].count), /* count */ + (int)(d[i].block), /* blocklength */ + (int)(d[i].strid), /* stride */ + inner_type, /* old type */ + &outer_type); /* new type */ + + MPI_Type_free(&inner_type); + if(mpi_code != MPI_SUCCESS) + HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code) + } /* end if */ else { - /* Things get a bit more complicated and require LARGE_DATATYPE processing - * There are two MPI datatypes that need to be created: - * 1) an internal contiguous block; and - * 2) a collection of elements where an element is a contiguous block(1). - * Remember that the input arguments to the MPI-IO functions use integer - * values to represent element counts. We ARE allowed however, in the - * more recent MPI implementations to use constructed datatypes whereby - * the total number of bytes in a transfer could be : - * (2GB-1)number_of_blocks * the_datatype_extent. - */ + /* Things get a bit more complicated and require LARGE_DATATYPE processing + * There are two MPI datatypes that need to be created: + * 1) an internal contiguous block; and + * 2) a collection of elements where an element is a contiguous block(1). + * Remember that the input arguments to the MPI-IO functions use integer + * values to represent element counts. We ARE allowed however, in the + * more recent MPI implementations to use constructed datatypes whereby + * the total number of bytes in a transfer could be : + * (2GB-1)number_of_blocks * the_datatype_extent. + */ MPI_Aint stride_in_bytes, inner_extent; MPI_Datatype block_type; - /* create a contiguous datatype inner_type x number of BLOCKS. - * Again we need to check that the number of BLOCKS can fit into + /* Create a contiguous datatype inner_type x number of BLOCKS. + * Again we need to check that the number of BLOCKS can fit into * a 32 bit integer */ - if (bigio_count < d[i].block) { - if (H5S_mpio_create_large_type(d[i].block, 0, inner_type, - &block_type) < 0) { - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, - "couldn't ccreate a large block datatype in hyper selection") - } - } - else { - if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)d[i].block, - inner_type, - &block_type))) + if(bigio_count < d[i].block) { + if(H5S__mpio_create_large_type(d[i].block, 0, inner_type, &block_type) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large block datatype in hyper selection") + } /* end if */ + else + if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)d[i].block, inner_type, &block_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) - } /* As of version 4.0, OpenMPI now turns off MPI-1 API calls by default, * so we're using the MPI-2 version even though we don't need the lb @@ -892,40 +883,38 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, /* If the element count is larger than what a 32 bit integer can hold, * we call the large type creation function to handle that */ - if (bigio_count < d[i].count) { - if (H5S_mpio_create_large_type (d[i].count, stride_in_bytes, block_type, - &outer_type) < 0) { - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, - "couldn't create a large outer datatype in hyper selection") - } - } + if(bigio_count < d[i].count) { + if(H5S__mpio_create_large_type(d[i].count, stride_in_bytes, block_type, &outer_type) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large outer datatype in hyper selection") + } /* end if */ /* otherwise a regular create_hvector will do */ - else { - mpi_code = MPI_Type_create_hvector((int)d[i].count, /* count */ - 1, /* blocklength */ - stride_in_bytes, /* stride in bytes*/ - block_type, /* old type */ - &outer_type); /* new type */ - if(MPI_SUCCESS != mpi_code) + else + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)d[i].count, /* count */ + 1, /* blocklength */ + stride_in_bytes, /* stride in bytes*/ + block_type, /* old type */ + &outer_type))) /* new type */ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) - } + MPI_Type_free(&block_type); MPI_Type_free(&inner_type); - } - /**************************************** - * Then build the dimension type as (start, vector type, xtent). - ****************************************/ - /* calculate start and extent values of this dimension */ + } /* end else */ + + /**************************************** + * Then build the dimension type as (start, vector type, xtent). + ****************************************/ + + /* Calculate start and extent values of this dimension */ start_disp = d[i].start * offset[i] * elmt_size; new_extent = (MPI_Aint)elmt_size * max_xtent[i]; if(MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(outer_type, &lb, &extent_len))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_get_extent failed", mpi_code) /************************************************* - * Restructure this datatype ("outer_type") - * so that it still starts at 0, but its extent - * is the full extent in this dimension. - *************************************************/ + * Restructure this datatype ("outer_type") + * so that it still starts at 0, but its extent + * is the full extent in this dimension. + *************************************************/ if(start_disp > 0 || extent_len < new_extent) { MPI_Datatype interm_type; int block_len = 1; @@ -957,13 +946,6 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size, /* fill in the remaining return values */ *count = 1; /* only have to move one of these suckers! */ *is_derived_type = TRUE; - HGOTO_DONE(SUCCEED); - -empty: - /* special case: empty hyperslab */ - *new_type = MPI_BYTE; - *count = 0; - *is_derived_type = FALSE; done: /* Release selection iterator */ @@ -972,21 +954,20 @@ done: HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator") #ifdef H5S_DEBUG - if(H5DEBUG(S)) - HDfprintf(H5DEBUG(S), "Leave %s, count=%ld is_derived_type=%t\n", - FUNC, *count, *is_derived_type ); +if(H5DEBUG(S)) + HDfprintf(H5DEBUG(S), "Leave %s, count=%ld is_derived_type=%t\n", FUNC, *count, *is_derived_type); #endif FUNC_LEAVE_NOAPI(ret_value) -} /* end H5S_mpio_hyper_type() */ +} /* end H5S__mpio_reg_hyper_type() */ /*------------------------------------------------------------------------- - * Function: H5S_mpio_span_hyper_type + * Function: H5S__mpio_span_hyper_type * * Purpose: Translate an HDF5 irregular hyperslab selection into an MPI type. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * *count how many objects of the new_type in selection @@ -995,14 +976,10 @@ done: * * Programmer: kyang * - * Modifications: - * Mohamad Chaarawi - * Adding support for large datatypes (beyond the limit of a - * 32 bit integer. *------------------------------------------------------------------------- */ static herr_t -H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, +H5S__mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type) { MPI_Datatype elmt_type; /* MPI datatype for an element */ @@ -1012,7 +989,7 @@ H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, int mpi_code; /* MPI return code */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Check args */ HDassert(space); @@ -1021,17 +998,13 @@ H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, HDassert(space->select.sel_info.hslab->span_lst->head); /* Create the base type for an element */ - if (bigio_count >= elmt_size) { - if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type))) { - HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) - } - } - else { - if (H5S_mpio_create_large_type (elmt_size, 0, MPI_BYTE, &elmt_type) < 0) { - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, - "couldn't create a large element datatype in span_hyper selection") - } - } + if(bigio_count >= elmt_size) { + if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) + } /* end if */ + else + if(H5S__mpio_create_large_type(elmt_size, 0, MPI_BYTE, &elmt_type) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large element datatype in span_hyper selection") elmt_type_is_derived = TRUE; /* Compute 'down' sizes for each dimension */ @@ -1039,8 +1012,8 @@ H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size, HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGETSIZE, FAIL, "couldn't compute 'down' dimension sizes") /* Obtain derived data type */ - if(H5S_obtain_datatype(down, space->select.sel_info.hslab->span_lst->head, &elmt_type, &span_type, elmt_size) < 0) - HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type") + if(H5S__obtain_datatype(down, space->select.sel_info.hslab->span_lst->head, &elmt_type, &span_type, elmt_size) < 0) + HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type") if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&span_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) *new_type = span_type; @@ -1056,16 +1029,15 @@ done: HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code) FUNC_LEAVE_NOAPI(ret_value) -} /* end H5S_mpio_span_hyper_type() */ +} /* end H5S__mpio_span_hyper_type() */ /*------------------------------------------------------------------------- - * Function: H5S_obtain_datatype + * Function: H5S__obtain_datatype * - * Purpose: Obtain an MPI derived datatype based on span-tree - * implementation + * Purpose: Obtain an MPI derived datatype for span-tree hyperslab selection * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *span_type the MPI type corresponding to the selection * @@ -1074,11 +1046,11 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, +H5S__obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size) { size_t alloc_count = 0; /* Number of span tree nodes allocated at this level */ - size_t outercount = 0; /* Number of span tree nodes at this level */ + size_t outercount; /* Number of span tree nodes at this level */ MPI_Datatype *inner_type = NULL; hbool_t inner_types_freed = FALSE; /* Whether the inner_type MPI datatypes have been freed */ hbool_t span_type_valid = FALSE; /* Whether the span_type MPI datatypes is valid */ @@ -1089,7 +1061,7 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, int mpi_code; /* MPI return status code */ herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Sanity check */ HDassert(span); @@ -1129,16 +1101,14 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, blocklen[outercount] = (int)tspan->nelem; tspan = tspan->next; - if (bigio_count < blocklen[outercount]) { + if(bigio_count < blocklen[outercount]) large_block = TRUE; /* at least one block type is large, so set this flag to true */ - } outercount++; } /* end while */ /* Everything fits into integers, so cast them and use hindexed */ - if (bigio_count >= outercount && large_block == FALSE) { - + if(bigio_count >= outercount && large_block == FALSE) { if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) span_type_valid = TRUE; @@ -1150,7 +1120,7 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, MPI_Datatype temp_type = MPI_DATATYPE_NULL, outer_type = MPI_DATATYPE_NULL; /* create the block type from elmt_type while checking the 32 bit int limit */ if (blocklen[i] > bigio_count) { - if (H5S_mpio_create_large_type (blocklen[i], 0, *elmt_type, &temp_type) < 0) { + if (H5S__mpio_create_large_type (blocklen[i], 0, *elmt_type, &temp_type) < 0) { HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create a large element datatype in span_hyper selection") } @@ -1233,7 +1203,7 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span, blocklen[outercount] = 1; /* Generate MPI datatype for next dimension down */ - if(H5S_obtain_datatype(down + 1, tspan->down->head, elmt_type, &down_type, elmt_size) < 0) + if(H5S__obtain_datatype(down + 1, tspan->down->head, elmt_type, &down_type, elmt_size) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't obtain MPI derived data type") /* Build the MPI datatype for this node */ @@ -1291,7 +1261,7 @@ done: } /* end if */ FUNC_LEAVE_NOAPI(ret_value) -} /* end H5S_obtain_datatype() */ +} /* end H5S__obtain_datatype() */ /*------------------------------------------------------------------------- @@ -1300,7 +1270,7 @@ done: * Purpose: Translate an HDF5 dataspace selection into an MPI type. * Currently handle only hyperslab and "all" selections. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * Outputs: *new_type the MPI type corresponding to the selection * *count how many objects of the new_type in selection @@ -1312,7 +1282,7 @@ done: *------------------------------------------------------------------------- */ herr_t -H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, +H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, int *count, hbool_t *is_derived_type, hbool_t do_permute, hsize_t **permute_map, hbool_t *is_permuted) { @@ -1333,10 +1303,10 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type * out-of-order point selection, then permute this selection which * should be a memory selection to match the file space permutation. */ - if(TRUE == *is_permuted) { + if(TRUE == *is_permuted) { switch(H5S_GET_SELECT_TYPE(space)) { case H5S_SEL_NONE: - if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0) + if(H5S__mpio_none_type(new_type, count, is_derived_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'none' selection to MPI type") break; @@ -1346,7 +1316,7 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type /* Sanity check */ HDassert(!do_permute); - if(H5S_mpio_permute_type(space, elmt_size, permute_map, new_type, count, is_derived_type) < 0) + if(H5S__mpio_permute_type(space, elmt_size, permute_map, new_type, count, is_derived_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'all' selection to MPI type") break; @@ -1361,29 +1331,28 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type else { switch(H5S_GET_SELECT_TYPE(space)) { case H5S_SEL_NONE: - if(H5S_mpio_none_type(new_type, count, is_derived_type) < 0) + if(H5S__mpio_none_type(new_type, count, is_derived_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'none' selection to MPI type") break; case H5S_SEL_ALL: - if(H5S_mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0) + if(H5S__mpio_all_type(space, elmt_size, new_type, count, is_derived_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert 'all' selection to MPI type") break; case H5S_SEL_POINTS: - if(H5S_mpio_point_type(space, elmt_size, new_type, count, is_derived_type, do_permute, permute_map, is_permuted) < 0) + if(H5S__mpio_point_type(space, elmt_size, new_type, count, is_derived_type, do_permute, permute_map, is_permuted) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't convert 'point' selection to MPI type") break; case H5S_SEL_HYPERSLABS: if((H5S_SELECT_IS_REGULAR(space) == TRUE)) { - if(H5S_mpio_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0) + if(H5S__mpio_reg_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert regular 'hyperslab' selection to MPI type") } /* end if */ - else { - if(H5S_mpio_span_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0) + else + if(H5S__mpio_span_hyper_type(space, elmt_size, new_type, count, is_derived_type) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't convert irregular 'hyperslab' selection to MPI type") - } /* end else */ break; case H5S_SEL_ERROR: @@ -1407,12 +1376,12 @@ done: /*------------------------------------------------------------------------- - * Function: H5S_mpio_create_large_type + * Function: H5S__mpio_create_large_type * - * Purpose: Create a large datatype of size larger than what a 32 bit integer + * Purpose: Create a large datatype of size larger than what a 32 bit integer * can hold. * - * Return: non-negative on success, negative on failure. + * Return: Non-negative on success, negative on failure. * * *new_type the new datatype created * @@ -1420,10 +1389,9 @@ done: * *------------------------------------------------------------------------- */ -static herr_t H5S_mpio_create_large_type (hsize_t num_elements, - MPI_Aint stride_bytes, - MPI_Datatype old_type, - MPI_Datatype *new_type) +static herr_t +H5S__mpio_create_large_type(hsize_t num_elements, MPI_Aint stride_bytes, + MPI_Datatype old_type, MPI_Datatype *new_type) { int num_big_types; /* num times the 2G datatype will be repeated */ int remaining_bytes; /* the number of bytes left that can be held in an int value */ @@ -1434,7 +1402,7 @@ static herr_t H5S_mpio_create_large_type (hsize_t num_elements, MPI_Aint disp[2], old_extent; herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI_NOINIT + FUNC_ENTER_STATIC /* Calculate how many Big MPI datatypes are needed to represent the buffer */ num_big_types = (int)(num_elements/bigio_count); @@ -1447,41 +1415,23 @@ static herr_t H5S_mpio_create_large_type (hsize_t num_elements, * use type_hvector to create the type with the displacement provided */ if (0 == stride_bytes) { - if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(bigio_count, - old_type, - &inner_type))) { + if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(bigio_count, old_type, &inner_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) - } - } - else { - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector (bigio_count, - 1, - stride_bytes, - old_type, - &inner_type))) { + } /* end if */ + else + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector(bigio_count, 1, stride_bytes, old_type, &inner_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) - } - } /* Create a contiguous datatype of the buffer (minus the remaining < 2GB part) * If a stride is present, use hvector type */ - if (0 == stride_bytes) { - if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(num_big_types, - inner_type, - &outer_type))) { + if(0 == stride_bytes) { + if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(num_big_types, inner_type, &outer_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) - } - } - else { - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector (num_big_types, - 1, - stride_bytes, - inner_type, - &outer_type))) { + } /* end if */ + else + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector(num_big_types, 1, stride_bytes, inner_type, &outer_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) - } - } MPI_Type_free(&inner_type); @@ -1489,23 +1439,13 @@ static herr_t H5S_mpio_create_large_type (hsize_t num_elements, * use a struct datatype to encapsulate everything. */ if(remaining_bytes) { - if (stride_bytes == 0) { - if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous (remaining_bytes, - old_type, - &leftover_type))) { + if(stride_bytes == 0) { + if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(remaining_bytes, old_type, &leftover_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code) - } - } - else { - if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector - ((int)(num_elements - (hsize_t)num_big_types*bigio_count), - 1, - stride_bytes, - old_type, - &leftover_type))) { + } /* end if */ + else + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)(num_elements - (hsize_t)num_big_types * bigio_count), 1, stride_bytes, old_type, &leftover_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) - } - } /* As of version 4.0, OpenMPI now turns off MPI-1 API calls by default, * so we're using the MPI-2 version even though we don't need the lb @@ -1522,28 +1462,25 @@ static herr_t H5S_mpio_create_large_type (hsize_t num_elements, block_len[0] = 1; block_len[1] = 1; disp[0] = 0; - disp[1] = (old_extent+stride_bytes)*num_big_types*(MPI_Aint)bigio_count; + disp[1] = (old_extent + stride_bytes) * num_big_types * (MPI_Aint)bigio_count; - if(MPI_SUCCESS != (mpi_code = - MPI_Type_create_struct(2, block_len, disp, type, new_type))) { + if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(2, block_len, disp, type, new_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code) - } MPI_Type_free(&outer_type); MPI_Type_free(&leftover_type); - } - else { + } /* end if */ + else /* There are no remaining bytes so just set the new type to * the outer type created */ *new_type = outer_type; - } if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) done: FUNC_LEAVE_NOAPI(ret_value) -} /* end H5S_mpio_create_large_type */ +} /* end H5S__mpio_create_large_type() */ #endif /* H5_HAVE_PARALLEL */ |