From f4c997c379a725b738368fd28b6e9a4b1bf4b88c Mon Sep 17 00:00:00 2001 From: Scot Breitenfeld Date: Fri, 6 Oct 2023 10:33:27 -0500 Subject: HDF5 with MPI2 build fix (#3641) * fixed fortran parallel testing with cmake * mpich2 compiler fix --- src/H5Dchunk.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index b5e1d5e..4664234 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -5060,8 +5060,10 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_ HDqsort(chunk_fill_info->chunk_info, chunk_fill_info->num_chunks, sizeof(struct chunk_coll_fill_info), H5D__chunk_cmp_coll_fill_info); - /* Allocate buffer for block lengths if necessary */ + /* Allocate buffer for block lengths if necessary */ +#if MPI_VERSION >= 3 if (!all_same_block_len) +#endif if (NULL == (block_lens = (int *)H5MM_malloc((size_t)(blocks + 1) * sizeof(int)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer") @@ -5070,8 +5072,9 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_ /* store the chunk address as an MPI_Aint */ chunk_disp_array[i] = (MPI_Aint)(chunk_fill_info->chunk_info[idx].addr); - +#if MPI_VERSION >= 3 if (!all_same_block_len) +#endif H5_CHECKED_ASSIGN(block_lens[i], int, chunk_fill_info->chunk_info[idx].chunk_size, size_t); if (chunk_fill_info->chunk_info[idx].unfiltered_partial_chunk) { @@ -5087,8 +5090,9 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_ if (leftover && leftover > mpi_rank) { chunk_disp_array[blocks] = (MPI_Aint)chunk_fill_info->chunk_info[(blocks * mpi_size) + mpi_rank].addr; - +#if MPI_VERSION >= 3 if (!all_same_block_len) +#endif H5_CHECKED_ASSIGN(block_lens[blocks], int, chunk_fill_info->chunk_info[(blocks * mpi_size) + mpi_rank].chunk_size, size_t); @@ -5106,11 +5110,16 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_ int block_len; H5_CHECKED_ASSIGN(block_len, int, chunk_fill_info->chunk_info[0].chunk_size, size_t); - +#if MPI_VERSION >= 3 mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE, &file_type); if (mpi_code != MPI_SUCCESS) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code) +#else + mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type); + if (mpi_code != MPI_SUCCESS) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) +#endif if (partial_chunk_fill_buf) { /* @@ -5119,10 +5128,16 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_ * need to be written to using the unfiltered fill buffer. Use an hindexed * block type rather than an hvector. */ +#if MPI_VERSION >= 3 mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, block_disps, MPI_BYTE, &mem_type); if (mpi_code != MPI_SUCCESS) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code) +#else + mpi_code = MPI_Type_create_hindexed(blocks, block_lens, block_disps, MPI_BYTE, &mem_type); + if (mpi_code != MPI_SUCCESS) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) +#endif } else { mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type); -- cgit v0.12