summaryrefslogtreecommitdiffstats
path: root/src/H5Dmpio.c
diff options
context:
space:
mode:
authorSean McBride <sean@rogue-research.com>2023-08-02 13:39:15 (GMT)
committerGitHub <noreply@github.com>2023-08-02 13:39:15 (GMT)
commit2b0badb90e4e516d0758efbd8ec96449188c7249 (patch)
tree074e38039b95ca7121a0117069974eff52a04519 /src/H5Dmpio.c
parent56451503c55ef6e5f67fc3bb710f9e5b649b1af2 (diff)
downloadhdf5-2b0badb90e4e516d0758efbd8ec96449188c7249.zip
hdf5-2b0badb90e4e516d0758efbd8ec96449188c7249.tar.gz
hdf5-2b0badb90e4e516d0758efbd8ec96449188c7249.tar.bz2
Made HGOTO_ERROR a do-while loop (#3308)
* Made HGOTO_ERROR a do-while loop
Diffstat (limited to 'src/H5Dmpio.c')
-rw-r--r--src/H5Dmpio.c304
1 files changed, 155 insertions, 149 deletions
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index be76c42..4b4c6ed 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -812,9 +812,9 @@ H5D__mpio_get_no_coll_cause_strings(char *local_cause, size_t local_cause_len, c
/* Retrieve the local and global cause flags from the API context */
if (H5CX_get_mpio_local_no_coll_cause(&local_no_coll_cause) < 0)
- HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "unable to get local no collective cause value")
+ HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "unable to get local no collective cause value");
if (H5CX_get_mpio_global_no_coll_cause(&global_no_coll_cause) < 0)
- HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "unable to get global no collective cause value")
+ HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "unable to get global no collective cause value");
/*
* Append each of the "reason for breaking collective I/O"
@@ -946,7 +946,7 @@ H5D__mpio_select_read(const H5D_io_info_t *io_info, hsize_t mpi_buf_count, H5S_t
H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t);
if (H5F_shared_block_read(io_info->f_sh, H5FD_MEM_DRAW, io_info->store_faddr, (size_t)mpi_buf_count,
rbuf) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "can't finish collective parallel read")
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "can't finish collective parallel read");
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -980,7 +980,7 @@ H5D__mpio_select_write(const H5D_io_info_t *io_info, hsize_t mpi_buf_count, H5S_
H5_CHECK_OVERFLOW(mpi_buf_count, hsize_t, size_t);
if (H5F_shared_block_write(io_info->f_sh, H5FD_MEM_DRAW, io_info->store_faddr, (size_t)mpi_buf_count,
wbuf) < 0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "can't finish collective parallel write")
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "can't finish collective parallel write");
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1114,14 +1114,14 @@ H5D__piece_io(H5D_io_info_t *io_info)
/* Obtain the current rank of the process and the number of ranks */
if ((mpi_rank = H5F_mpi_get_rank(io_info->dsets_info[0].dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank")
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank");
if ((mpi_size = H5F_mpi_get_size(io_info->dsets_info[0].dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI size")
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI size");
#ifdef H5Dmpio_DEBUG
/* Initialize file-level debugging if not initialized */
if (!H5D_mpio_debug_inited && H5D__mpio_debug_init() < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize H5Dmpio debugging")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize H5Dmpio debugging");
/* Open file for debugging if necessary */
log_file_flag = H5D_mpio_debug_flags_s[(int)'f'];
@@ -1132,7 +1132,7 @@ H5D__piece_io(H5D_io_info_t *io_info)
HDsnprintf(debug_log_filename, 1024, "H5Dmpio_debug.rank%d", mpi_rank);
if (NULL == (debug_log_file = fopen(debug_log_filename, "a")))
- HGOTO_ERROR(H5E_IO, H5E_OPENERROR, FAIL, "couldn't open debugging log file")
+ HGOTO_ERROR(H5E_IO, H5E_OPENERROR, FAIL, "couldn't open debugging log file");
/* Print a short header for this I/O operation */
time_now = HDtime(NULL);
@@ -1146,7 +1146,7 @@ H5D__piece_io(H5D_io_info_t *io_info)
* Only set here if it's a static option, if it needs to be calculated using the
* number of chunks per process delay that calculation until later. */
if (H5CX_get_mpio_chunk_opt_mode(&chunk_opt_mode) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get chunk optimization option")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get chunk optimization option");
if (H5FD_MPIO_CHUNK_ONE_IO == chunk_opt_mode)
io_option = H5D_ONE_LINK_CHUNK_IO; /*no opt*/
@@ -1173,7 +1173,7 @@ H5D__piece_io(H5D_io_info_t *io_info)
/* Get the chunk optimization option threshold */
if (H5CX_get_mpio_chunk_opt_num(&one_link_chunk_io_threshold) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
- "couldn't get chunk optimization option threshold value")
+ "couldn't get chunk optimization option threshold value");
/* If the threshold is 0, no need to check number of chunks */
if (one_link_chunk_io_threshold > 0) {
@@ -1198,13 +1198,13 @@ H5D__piece_io(H5D_io_info_t *io_info)
/*** Set collective chunk user-input optimization API. ***/
if (H5D_ONE_LINK_CHUNK_IO == io_option) {
if (H5CX_test_set_mpio_coll_chunk_link_hard(0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value");
} /* end if */
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
/* Perform unfiltered link chunk collective IO */
if (H5D__link_piece_collective_io(io_info, mpi_rank) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish linked chunk MPI-IO")
+ HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish linked chunk MPI-IO");
}
}
@@ -1221,7 +1221,7 @@ H5D__piece_io(H5D_io_info_t *io_info)
if (H5D__inter_collective_io(io_info, &io_info->dsets_info[i],
io_info->dsets_info[i].file_space,
io_info->dsets_info[i].mem_space) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO")
+ HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO");
/* Set the actual I/O mode property. internal_collective_io will not break to
* independent I/O, so we set it here.
@@ -1237,7 +1237,7 @@ H5D__piece_io(H5D_io_info_t *io_info)
/* Get the chunk optimization option threshold */
if (H5CX_get_mpio_chunk_opt_num(&one_link_chunk_io_threshold) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
- "couldn't get chunk optimization option threshold value")
+ "couldn't get chunk optimization option threshold value");
/* If the threshold is 0, no need to check number of chunks */
if (one_link_chunk_io_threshold == 0) {
@@ -1269,7 +1269,7 @@ H5D__piece_io(H5D_io_info_t *io_info)
if (H5D__link_chunk_filtered_collective_io(io_info, &io_info->dsets_info[i],
mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,
- "couldn't finish filtered linked chunk MPI-IO")
+ "couldn't finish filtered linked chunk MPI-IO");
} /* end if */
else {
/* If there is more than one dataset we cannot make the multi dataset call here,
@@ -1281,13 +1281,13 @@ H5D__piece_io(H5D_io_info_t *io_info)
if (H5D__multi_chunk_collective_io(io_info, &io_info->dsets_info[i], mpi_rank,
mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,
- "couldn't finish optimized multiple chunk MPI-IO")
+ "couldn't finish optimized multiple chunk MPI-IO");
}
else {
/* Perform unfiltered link chunk collective IO */
if (H5D__link_piece_collective_io(io_info, mpi_rank) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,
- "couldn't finish linked chunk MPI-IO")
+ "couldn't finish linked chunk MPI-IO");
}
}
@@ -1300,14 +1300,14 @@ H5D__piece_io(H5D_io_info_t *io_info)
if (H5D__multi_chunk_filtered_collective_io(io_info, &io_info->dsets_info[i],
mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,
- "couldn't finish optimized multiple filtered chunk MPI-IO")
+ "couldn't finish optimized multiple filtered chunk MPI-IO");
} /* end if */
else {
/* Perform unfiltered multi chunk collective IO */
if (H5D__multi_chunk_collective_io(io_info, &io_info->dsets_info[i], mpi_rank,
mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,
- "couldn't finish optimized multiple chunk MPI-IO")
+ "couldn't finish optimized multiple chunk MPI-IO");
}
break;
@@ -1318,19 +1318,19 @@ H5D__piece_io(H5D_io_info_t *io_info)
/*** Set collective chunk user-input optimization APIs. ***/
if (H5D_ONE_LINK_CHUNK_IO == io_option) {
if (H5CX_test_set_mpio_coll_chunk_link_hard(0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value");
} /* end if */
else if (H5D_MULTI_CHUNK_IO == io_option) {
if (H5CX_test_set_mpio_coll_chunk_multi_hard(0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value");
} /* end else-if */
else if (H5D_ONE_LINK_CHUNK_IO_MORE_OPT == io_option) {
if (H5CX_test_set_mpio_coll_chunk_link_num_true(0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value");
} /* end if */
else if (H5D_MULTI_CHUNK_IO_MORE_OPT == io_option) {
if (H5CX_test_set_mpio_coll_chunk_link_num_false(0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value");
} /* end if */
}
#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
@@ -1371,7 +1371,7 @@ H5D__collective_read(H5D_io_info_t *io_info)
/* Call generic selection operation */
if (H5D__piece_io(io_info) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, FAIL, "read error")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, FAIL, "read error");
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1396,7 +1396,7 @@ H5D__collective_write(H5D_io_info_t *io_info)
/* Call generic selection operation */
if (H5D__piece_io(io_info) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error");
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1455,7 +1455,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran
else if (io_info->dsets_info[i].layout->type == H5D_CONTIGUOUS)
actual_io_mode |= H5D_MPIO_CONTIGUOUS_COLLECTIVE;
else
- HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout")
+ HGOTO_ERROR(H5E_IO, H5E_UNSUPPORTED, FAIL, "unsupported storage layout");
}
/* Set the actual-chunk-opt-mode property. */
@@ -1507,25 +1507,25 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran
/* Allocate chunking information */
if (NULL == (chunk_mtype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk memory datatype buffer")
+ "couldn't allocate chunk memory datatype buffer");
if (NULL == (chunk_ftype = (MPI_Datatype *)H5MM_malloc(num_chunk * sizeof(MPI_Datatype))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file datatype buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file datatype buffer");
if (NULL == (chunk_file_disp_array = (MPI_Aint *)H5MM_malloc(num_chunk * sizeof(MPI_Aint))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk file displacement buffer")
+ "couldn't allocate chunk file displacement buffer");
if (NULL == (chunk_mem_disp_array = (MPI_Aint *)H5MM_calloc(num_chunk * sizeof(MPI_Aint))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk memory displacement buffer")
+ "couldn't allocate chunk memory displacement buffer");
if (NULL == (chunk_mpi_mem_counts = (int *)H5MM_calloc(num_chunk * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory counts buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk memory counts buffer");
if (NULL == (chunk_mpi_file_counts = (int *)H5MM_calloc(num_chunk * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file counts buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file counts buffer");
if (NULL == (chunk_mbt_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk memory is derived datatype flags buffer")
+ "couldn't allocate chunk memory is derived datatype flags buffer");
if (NULL == (chunk_mft_is_derived_array = (hbool_t *)H5MM_calloc(num_chunk * sizeof(hbool_t))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk file is derived datatype flags buffer")
+ "couldn't allocate chunk file is derived datatype flags buffer");
/* save lowest file address */
ctg_store.contig.dset_addr = io_info->sel_pieces[0]->faddr;
@@ -1568,7 +1568,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran
selected in case they
are out of order */
&is_permuted /* OUT */) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type");
/* Sanity check */
if (is_permuted)
@@ -1589,7 +1589,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran
and applied to the
memory selection */
&is_permuted /* IN */) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buf type")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buf type");
/* Sanity check */
if (is_permuted)
assert(!permute_map);
@@ -1665,7 +1665,7 @@ H5D__link_piece_collective_io(H5D_io_info_t *io_info, int H5_ATTR_UNUSED mpi_ran
/* Perform final collective I/O operation */
if (H5D__final_collective_io(io_info, mpi_buf_count, chunk_final_ftype, chunk_final_mtype) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO")
+ HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO");
}
done:
@@ -1799,12 +1799,13 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_
H5CX_set_mpio_actual_io_mode(H5D_MPIO_CHUNK_COLLECTIVE);
/* Build a list of selected chunks in the collective io operation */
+
if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, dset_info, mpi_rank, &chunk_list) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list");
if (io_info->op_type == H5D_IO_OP_READ) { /* Filtered collective read */
if (H5D__mpio_collective_filtered_chunk_read(&chunk_list, io_info, dset_info, mpi_rank) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks");
}
else { /* Filtered collective write */
H5D_chk_idx_info_t index_info;
@@ -1815,13 +1816,13 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_
/* Redistribute shared chunks being written to */
if (H5D__mpio_redistribute_shared_chunks(&chunk_list, io_info, mpi_rank, mpi_size,
&rank_chunks_assigned_map) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks");
/* Send any chunk modification messages for chunks this rank no longer owns */
if (H5D__mpio_share_chunk_modification_data(&chunk_list, io_info, dset_info, mpi_rank, mpi_size,
&chunk_msg_bufs, &chunk_msg_bufs_len) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "unable to send chunk modification data between MPI ranks")
+ "unable to send chunk modification data between MPI ranks");
/* Make sure the local chunk list was updated correctly */
assert(chunk_list.num_chunk_infos == rank_chunks_assigned_map[mpi_rank]);
@@ -1834,7 +1835,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_
*/
if (H5D__mpio_collective_filtered_chunk_update(&chunk_list, chunk_msg_bufs, chunk_msg_bufs_len,
io_info, dset_info, mpi_rank) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks");
/* Free up resources used by chunk hash table now that we're done updating chunks */
HASH_CLEAR(hh, chunk_list.chunk_hash_table);
@@ -1843,11 +1844,11 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_
if (H5D__mpio_collective_filtered_chunk_reallocate(&chunk_list, rank_chunks_assigned_map, io_info,
&index_info, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "couldn't collectively re-allocate file space for chunks")
+ "couldn't collectively re-allocate file space for chunks");
/* Perform vector I/O on chunks */
if (H5D__mpio_collective_filtered_vec_io(&chunk_list, io_info->f_sh, io_info->op_type) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't perform vector I/O on filtered chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't perform vector I/O on filtered chunks");
/* Free up resources in anticipation of following collective operation */
for (size_t i = 0; i < chunk_list.num_chunk_infos; i++) {
@@ -1863,7 +1864,7 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_
if (H5D__mpio_collective_filtered_chunk_reinsert(&chunk_list, rank_chunks_assigned_map, io_info,
dset_info, &index_info, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "couldn't collectively re-insert modified chunks into chunk index")
+ "couldn't collectively re-insert modified chunks into chunk index");
}
done:
@@ -1937,7 +1938,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
/* Get the current I/O collective opt mode so we can restore it later */
if (H5CX_get_mpio_coll_opt(&orig_coll_opt_mode) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get MPI-I/O collective_op property")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get MPI-I/O collective_op property");
/* Set the actual chunk opt mode property */
H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_MULTI_CHUNK);
@@ -1956,7 +1957,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
/* Obtain IO option for each chunk */
if (H5D__obtain_mpio_mode(io_info, dset_info, chunk_io_option, chunk_addr, mpi_rank, mpi_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTRECV, FAIL, "unable to obtain MPIO mode")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTRECV, FAIL, "unable to obtain MPIO mode");
/* Set memory buffers */
io_info->base_maddr = dset_info->buf;
@@ -1972,9 +1973,9 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
* stored in index order and since we're iterating in index order we can
* just check for each chunk being selected in order */
if (NULL == (piece_node = H5SL_first(dset_info->layout_io_info.chunk_map->dset_sel_pieces)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get piece node from skip list")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get piece node from skip list");
if (NULL == (next_chunk_info = (H5D_piece_info_t *)H5SL_item(piece_node)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get piece info from skip list")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get piece info from skip list");
}
/* Loop over _all_ the chunks */
@@ -2001,9 +2002,9 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
/* Advance next chunk to next node in skip list, if there are more chunks selected */
if (num_chunk) {
if (NULL == (piece_node = H5SL_next(piece_node)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk skip list terminated early")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk skip list terminated early");
if (NULL == (next_chunk_info = (H5D_piece_info_t *)H5SL_item(piece_node)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get piece info from skip list")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get piece info from skip list");
}
/* Pass in chunk's coordinates in a union. */
@@ -2043,7 +2044,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
/* Switch back to collective I/O */
if (last_coll_opt_mode != H5FD_MPIO_COLLECTIVE_IO) {
if (H5CX_set_mpio_coll_opt(H5FD_MPIO_COLLECTIVE_IO) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't switch to collective I/O")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't switch to collective I/O");
last_coll_opt_mode = H5FD_MPIO_COLLECTIVE_IO;
} /* end if */
@@ -2052,7 +2053,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
/* Perform the I/O */
if (H5D__inter_collective_io(io_info, dset_info, fspace, mspace) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO")
+ HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO");
} /* end if */
else { /* possible independent IO for this chunk */
#ifdef H5Dmpio_DEBUG
@@ -2077,7 +2078,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
/* Using independent I/O with file setview.*/
if (last_coll_opt_mode != H5FD_MPIO_INDIVIDUAL_IO) {
if (H5CX_set_mpio_coll_opt(H5FD_MPIO_INDIVIDUAL_IO) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't switch to individual I/O")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't switch to individual I/O");
last_coll_opt_mode = H5FD_MPIO_INDIVIDUAL_IO;
} /* end if */
@@ -2086,7 +2087,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info_t *dset_
/* Perform the I/O */
if (H5D__inter_collective_io(io_info, dset_info, fspace, mspace) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO")
+ HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO");
#ifdef H5Dmpio_DEBUG
H5D_MPIO_DEBUG(mpi_rank, "after inter collective IO");
#endif
@@ -2221,7 +2222,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info
/* Build a list of selected chunks in the collective IO operation */
if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, dset_info, mpi_rank, &chunk_list) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list");
/* Retrieve the maximum number of chunks selected for any rank */
if (MPI_SUCCESS != (mpi_code = MPI_Allreduce(&chunk_list.num_chunk_infos, &max_num_chunks, 1,
@@ -2256,7 +2257,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info
if (H5D__mpio_collective_filtered_chunk_read(&single_chunk_list, io_info, dset_info, mpi_rank) <
0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks");
if (have_chunk_to_process && chunk_list.chunk_infos[i].buf) {
H5MM_free(chunk_list.chunk_infos[i].buf);
@@ -2273,13 +2274,13 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info
if (mpi_size > 1) {
/* Redistribute shared chunks being written to */
if (H5D__mpio_redistribute_shared_chunks(&chunk_list, io_info, mpi_rank, mpi_size, NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks");
/* Send any chunk modification messages for chunks this rank no longer owns */
if (H5D__mpio_share_chunk_modification_data(&chunk_list, io_info, dset_info, mpi_rank, mpi_size,
&chunk_msg_bufs, &chunk_msg_bufs_len) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "unable to send chunk modification data between MPI ranks")
+ "unable to send chunk modification data between MPI ranks");
}
/* Iterate over the max number of chunks among all ranks, as this rank could
@@ -2317,18 +2318,18 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info
*/
if (H5D__mpio_collective_filtered_chunk_update(
&single_chunk_list, chunk_msg_bufs, chunk_msg_bufs_len, io_info, dset_info, mpi_rank) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks");
/* All ranks now collectively re-allocate file space for all chunks */
if (H5D__mpio_collective_filtered_chunk_reallocate(&single_chunk_list, NULL, io_info, &index_info,
mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "couldn't collectively re-allocate file space for chunks")
+ "couldn't collectively re-allocate file space for chunks");
/* Perform vector I/O on chunks */
if (H5D__mpio_collective_filtered_vec_io(&single_chunk_list, io_info->f_sh, io_info->op_type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "couldn't perform vector I/O on filtered chunks")
+ "couldn't perform vector I/O on filtered chunks");
/* Free up resources in anticipation of following collective operation */
if (have_chunk_to_process && chunk_list.chunk_infos[i].buf) {
@@ -2342,7 +2343,7 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, H5D_dset_io_info
if (H5D__mpio_collective_filtered_chunk_reinsert(&single_chunk_list, NULL, io_info, dset_info,
&index_info, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "couldn't collectively re-insert modified chunks into chunk index")
+ "couldn't collectively re-insert modified chunks into chunk index");
} /* end for */
}
@@ -2404,7 +2405,7 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, H
H5D_MPIO_TRACE_ENTER(mpi_rank);
H5D_MPIO_TIME_START(mpi_rank, "Inter collective I/O");
if (mpi_rank < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank")
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank");
#endif
assert(io_info);
@@ -2436,7 +2437,7 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, H
case they are out of
order */
&is_permuted /* OUT */) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI file type");
/* Sanity check */
if (is_permuted)
assert(permute_map);
@@ -2455,7 +2456,7 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, H
memory selection */
,
&is_permuted /* IN */) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "couldn't create MPI buffer type");
/* Sanity check */
if (is_permuted)
assert(!permute_map);
@@ -2475,7 +2476,7 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_dset_io_info_t *di, H
/* Perform final collective I/O operation */
if (H5D__final_collective_io(io_info, (hsize_t)mpi_buf_count, mpi_file_type, mpi_buf_type) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish collective MPI-IO")
+ HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish collective MPI-IO");
done:
/* Free the MPI buf and file types, if they were derived */
@@ -2518,20 +2519,20 @@ H5D__final_collective_io(H5D_io_info_t *io_info, hsize_t mpi_buf_count, MPI_Data
H5D_MPIO_TRACE_ENTER(mpi_rank);
H5D_MPIO_TIME_START(mpi_rank, "Final collective I/O");
if (mpi_rank < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank")
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank");
#endif
/* Pass buf type, file type to the file driver. */
if (H5CX_set_mpi_coll_datatypes(mpi_buf_type, mpi_file_type) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set MPI-I/O collective I/O datatypes")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set MPI-I/O collective I/O datatypes");
if (io_info->op_type == H5D_IO_OP_WRITE) {
if ((io_info->md_io_ops.single_write_md)(io_info, mpi_buf_count, NULL, NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed");
} /* end if */
else {
if ((io_info->md_io_ops.single_read_md)(io_info, mpi_buf_count, NULL, NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed");
} /* end else */
done:
@@ -2790,7 +2791,7 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as
/* Setup parameters */
H5_CHECKED_ASSIGN(total_chunks, size_t, di->layout->u.chunk.nchunks, hsize_t);
if (H5CX_get_mpio_chunk_opt_ratio(&percent_nproc_per_chunk) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get percent nproc per chunk")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't get percent nproc per chunk");
/* if ratio is 0, perform collective io */
if (0 == percent_nproc_per_chunk) {
if (H5D__chunk_addrmap(di->dset, chunk_addr) < 0)
@@ -2805,13 +2806,13 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as
/* Allocate memory */
if (NULL == (io_mode_info = (uint8_t *)H5MM_calloc(total_chunks)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate I/O mode info buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate I/O mode info buffer");
if (NULL == (mergebuf = (uint8_t *)H5MM_malloc((sizeof(haddr_t) + 1) * total_chunks)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate mergebuf buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate mergebuf buffer");
tempbuf = mergebuf + total_chunks;
if (mpi_rank == root)
if (NULL == (recv_io_mode_info = (uint8_t *)H5MM_malloc(total_chunks * (size_t)mpi_size)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate recv I/O mode info buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate recv I/O mode info buffer");
/* Obtain the regularity and selection information for all chunks in this process. */
chunk_node = H5SL_first(di->layout_io_info.chunk_map->dset_sel_pieces);
@@ -2849,12 +2850,12 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as
/* pre-computing: calculate number of processes and
regularity of the selection occupied in each chunk */
if (NULL == (nproc_per_chunk = (unsigned *)H5MM_calloc(total_chunks * sizeof(unsigned))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate nproc_per_chunk buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate nproc_per_chunk buffer");
/* calculating the chunk address */
if (H5D__chunk_addrmap(di->dset, chunk_addr) < 0) {
H5MM_free(nproc_per_chunk);
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address");
} /* end if */
/* checking for number of process per chunk and regularity of the selection*/
@@ -2885,7 +2886,7 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as
/* Broadcasting the MPI_IO option info. and chunk address info. */
if ((sizeof(haddr_t) + 1) * total_chunks > INT_MAX)
- HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "result overflow")
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "result overflow");
if (MPI_SUCCESS !=
(mpi_code = MPI_Bcast(mergebuf, (int)((sizeof(haddr_t) + 1) * total_chunks), MPI_BYTE, root, comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_BCast failed", mpi_code)
@@ -2900,14 +2901,14 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as
for (ic = 0; ic < total_chunks; ic++)
if (assign_io_mode[ic] == H5D_CHUNK_IO_MODE_COL) {
if (H5CX_test_set_mpio_coll_chunk_multi_ratio_coll(0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value");
coll_op = TRUE;
break;
} /* end if */
if (!coll_op)
if (H5CX_test_set_mpio_coll_chunk_multi_ratio_ind(0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to set property value");
}
#endif
@@ -2978,7 +2979,7 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const
!(di->dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
if (NULL == (local_info_array = H5MM_malloc(num_chunks_selected * sizeof(*local_info_array))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate local io info array buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate local io info array buffer");
chunk_node = H5SL_first(di->layout_io_info.chunk_map->dset_sel_pieces);
for (size_t i = 0; chunk_node; i++) {
@@ -2986,7 +2987,7 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const
/* Obtain this chunk's address */
if (H5D__chunk_lookup(di->dset, chunk_info->scaled, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address");
/* Initialize rank-local chunk info */
local_info_array[i].chunk_info = chunk_info;
@@ -3124,7 +3125,7 @@ H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const
* in a more natural way, but this hack should suffice for now.
*/
if (H5D__chunk_lookup(di->dset, scaled, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address");
}
chunk_list->chunk_infos = local_info_array;
@@ -3197,7 +3198,7 @@ H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_li
* MPI communication.
*/
if (NULL == (num_chunks_map = H5MM_malloc((size_t)mpi_size * sizeof(*num_chunks_map))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate assigned chunks array")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate assigned chunks array");
/* Perform initial Allgather to determine the collective chunk list size */
if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&chunk_list->num_chunk_infos, 1, H5_SIZE_T_AS_MPI_TYPE,
@@ -3218,7 +3219,7 @@ H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_li
if (H5D__mpio_redistribute_shared_chunks_int(chunk_list, num_chunks_map, redistribute_on_all_ranks,
io_info, mpi_rank, mpi_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTREDISTRIBUTE, FAIL, "can't redistribute shared chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREDISTRIBUTE, FAIL, "can't redistribute shared chunks");
/*
* If the caller provided a pointer for the mapping from
@@ -3383,14 +3384,14 @@ H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chun
if (H5D__mpio_get_chunk_redistribute_info_types(&packed_type, &packed_type_derived, &struct_type,
&struct_type_derived) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
- "can't create derived datatypes for chunk redistribution info")
+ "can't create derived datatypes for chunk redistribution info");
/* Perform gather operation */
if (H5_mpio_gatherv_alloc(chunk_list->chunk_infos, num_chunks_int, struct_type, counts_ptr,
displacements_ptr, packed_type, all_ranks_involved, 0, io_info->comm, mpi_rank,
mpi_size, &coll_chunk_list, &coll_chunk_list_num_entries) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
- "can't gather chunk redistribution info to involved ranks")
+ "can't gather chunk redistribution info to involved ranks");
/*
* If all ranks are redistributing shared chunks, we no
@@ -3702,7 +3703,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
if (chunk_list->num_chunk_infos > 0) {
/* Allocate a selection iterator for iterating over chunk dataspaces */
if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dataspace selection iterator")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dataspace selection iterator");
/*
* Allocate send buffer and MPI_Request arrays for non-blocking
@@ -3711,10 +3712,10 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
send_bufs_nalloc = H5D_CHUNK_NUM_SEND_MSGS_INIT;
if (NULL == (msg_send_bufs = H5MM_malloc(send_bufs_nalloc * sizeof(*msg_send_bufs))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk modification message buffer array")
+ "couldn't allocate chunk modification message buffer array");
if (NULL == (send_requests = H5MM_malloc(send_bufs_nalloc * sizeof(*send_requests))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate send requests array")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate send requests array");
}
/*
@@ -3771,7 +3772,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
/* Determine size of serialized chunk file dataspace */
if (H5S_encode(chunk_info->fspace, &mod_data_p, &space_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get encoded dataspace size")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get encoded dataspace size");
mod_data_size += space_size;
/* Determine size of data being written */
@@ -3782,7 +3783,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
if (NULL == (msg_send_bufs[num_send_requests] = H5MM_malloc(mod_data_size)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk modification message buffer")
+ "couldn't allocate chunk modification message buffer");
mod_data_p = msg_send_bufs[num_send_requests];
@@ -3792,18 +3793,18 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
/* Serialize the chunk's file dataspace into the buffer */
if (H5S_encode(chunk_info->fspace, &mod_data_p, &mod_data_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to encode dataspace")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to encode dataspace");
/* Initialize iterator for memory selection */
if (H5S_select_iter_init(mem_iter, chunk_info->mspace, dset_info->type_info.src_type_size,
H5S_SEL_ITER_SHARE_WITH_DATASPACE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "unable to initialize memory selection information")
+ "unable to initialize memory selection information");
mem_iter_init = TRUE;
/* Collect the modification data into the buffer */
if (0 == H5D__gather_mem(dset_info->buf.cvp, mem_iter, (size_t)iter_nelmts, mod_data_p))
- HGOTO_ERROR(H5E_IO, H5E_CANTGATHER, FAIL, "couldn't gather from write buffer")
+ HGOTO_ERROR(H5E_IO, H5E_CANTGATHER, FAIL, "couldn't gather from write buffer");
/*
* Ensure that the size of the chunk data being sent can be
@@ -3834,17 +3835,17 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
if (NULL ==
(tmp_alloc = H5MM_realloc(msg_send_bufs, send_bufs_nalloc * sizeof(*msg_send_bufs))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't resize chunk modification message buffer array")
+ "couldn't resize chunk modification message buffer array");
msg_send_bufs = tmp_alloc;
if (NULL ==
(tmp_alloc = H5MM_realloc(send_requests, send_bufs_nalloc * sizeof(*send_requests))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't resize send requests array")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't resize send requests array");
send_requests = tmp_alloc;
}
if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release memory selection iterator")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release memory selection iterator");
mem_iter_init = FALSE;
}
}
@@ -3852,7 +3853,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
/* Check if the number of send or receive requests will overflow an int (MPI requirement) */
if (num_send_requests > INT_MAX || num_msgs_incoming > INT_MAX)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
- "too many shared chunks in parallel filtered write operation")
+ "too many shared chunks in parallel filtered write operation");
H5_CHECK_OVERFLOW(num_send_requests, size_t, int);
H5_CHECK_OVERFLOW(num_msgs_incoming, size_t, int);
@@ -3864,10 +3865,10 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
if (num_msgs_incoming) {
if (NULL == (msg_recv_bufs = H5MM_malloc(num_msgs_incoming * sizeof(*msg_recv_bufs))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk modification message buffer array")
+ "couldn't allocate chunk modification message buffer array");
if (NULL == (recv_requests = H5MM_malloc(num_msgs_incoming * sizeof(*recv_requests))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive requests array")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive requests array");
}
/* Process any incoming messages until everyone is done */
@@ -3900,13 +3901,13 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
#endif
if (msg_size <= 0)
- HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid chunk modification message size")
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid chunk modification message size");
assert((num_recv_requests + 1) <= num_msgs_incoming);
if (NULL ==
(msg_recv_bufs[num_recv_requests] = H5MM_malloc((size_t)msg_size * sizeof(unsigned char))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk modification message receive buffer")
+ "couldn't allocate chunk modification message receive buffer");
if (MPI_SUCCESS != (mpi_code = MPI_Irecv(msg_recv_bufs[num_recv_requests], (int)msg_size,
MPI_BYTE, status.MPI_SOURCE, H5D_CHUNK_MOD_DATA_TAG,
@@ -4105,9 +4106,9 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun
if (chunk_list->num_chunk_infos) {
/* Retrieve filter settings from API context */
if (H5CX_get_err_detect(&err_detect) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info");
if (H5CX_get_filter_cb(&filter_cb) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function");
/* Set size of full chunks in dataset */
file_chunk_size = di->dset->shared->layout.u.chunk.size;
@@ -4182,7 +4183,7 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun
/* Get a dataspace for filling chunk memory buffers */
if (NULL == (fill_space = H5S_create_simple(di->dset->shared->layout.u.chunk.ndims - 1,
chunk_dims, NULL)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create chunk fill dataspace")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create chunk fill dataspace");
/* Initialize fill value buffer */
if (H5D__fill_init(
@@ -4190,7 +4191,7 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun
(void *)&di->dset->shared->dcpl_cache.pline, (H5MM_free_t)H5D__chunk_mem_free,
(void *)&di->dset->shared->dcpl_cache.pline, &di->dset->shared->dcpl_cache.fill,
di->dset->shared->type, di->dset->shared->type_id, 0, file_chunk_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill value buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill value buffer");
fb_info_init = TRUE;
}
@@ -4199,7 +4200,8 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun
assert(fb_info.fill_buf);
if (H5D__fill(fb_info.fill_buf, di->dset->shared->type, chunk_entry->buf,
di->type_info.mem_type, fill_space) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't fill chunk buffer with fill value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "couldn't fill chunk buffer with fill value");
}
}
}
@@ -4212,12 +4214,12 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun
index_empty = FALSE;
if (di->dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR)
if (H5D__chunk_index_empty(di->dset, &index_empty) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty");
if (!index_empty) {
/* Perform collective vector read */
if (H5D__mpio_collective_filtered_vec_io(chunk_list, io_info->f_sh, H5D_IO_OP_READ) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't perform vector I/O on filtered chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't perform vector I/O on filtered chunks");
}
/*
@@ -4234,7 +4236,7 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun
&(chunk_entry->index_info.filter_mask), err_detect, filter_cb,
(size_t *)&chunk_entry->chunk_new.length, &chunk_entry->chunk_buf_size,
&chunk_entry->buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying");
}
/* Scatter the chunk data to the read buffer */
@@ -4242,7 +4244,7 @@ H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chun
if (H5D_select_io_mem(di->buf.vp, chunk_info->mspace, chunk_entry->buf, chunk_info->fspace,
di->type_info.src_type_size, (size_t)iter_nelmts) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't copy chunk data to read buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't copy chunk data to read buffer");
}
done:
@@ -4322,9 +4324,9 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
if (chunk_list->num_chunk_infos > 0) {
/* Retrieve filter settings from API context */
if (H5CX_get_err_detect(&err_detect) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info");
if (H5CX_get_filter_cb(&filter_cb) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function");
/* Set size of full chunks in dataset */
file_chunk_size = di->dset->shared->layout.u.chunk.size;
@@ -4418,7 +4420,7 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
if (NULL == (fill_space = H5S_create_simple(
di->dset->shared->layout.u.chunk.ndims - 1, chunk_dims, NULL)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "unable to create chunk fill dataspace")
+ "unable to create chunk fill dataspace");
/* Initialize fill value buffer */
if (H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc,
@@ -4427,7 +4429,8 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
(void *)&di->dset->shared->dcpl_cache.pline,
&di->dset->shared->dcpl_cache.fill, di->dset->shared->type,
di->dset->shared->type_id, 0, file_chunk_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill value buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "can't initialize fill value buffer");
fb_info_init = TRUE;
}
@@ -4437,7 +4440,7 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
if (H5D__fill(fb_info.fill_buf, di->dset->shared->type, chunk_entry->buf,
type_info->mem_type, fill_space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "couldn't fill chunk buffer with fill value")
+ "couldn't fill chunk buffer with fill value");
}
}
}
@@ -4453,12 +4456,12 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
index_empty = FALSE;
if (di->dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR)
if (H5D__chunk_index_empty(di->dset, &index_empty) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty");
if (!index_empty) {
/* Perform collective vector read */
if (H5D__mpio_collective_filtered_vec_io(chunk_list, io_info->f_sh, H5D_IO_OP_READ) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't perform vector I/O on filtered chunks")
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't perform vector I/O on filtered chunks");
}
/*
@@ -4482,19 +4485,19 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
&(chunk_entry->index_info.filter_mask), err_detect, filter_cb,
(size_t *)&chunk_entry->chunk_new.length, &chunk_entry->chunk_buf_size,
&chunk_entry->buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying");
}
iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace);
if (H5D_select_io_mem(chunk_entry->buf, chunk_info->fspace, di->buf.cvp, chunk_info->mspace,
type_info->dst_type_size, (size_t)iter_nelmts) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't copy chunk data to write buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't copy chunk data to write buffer");
}
/* Allocate iterator for memory selection */
if (NULL == (sel_iter = H5FL_MALLOC(H5S_sel_iter_t)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate memory iterator")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate memory iterator");
/* Now process all received chunk message buffers */
for (i = 0; i < (size_t)chunk_msg_bufs_len; i++) {
@@ -4510,9 +4513,9 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
/* Find the chunk entry according to its chunk index */
HASH_FIND(hh, chunk_list->chunk_hash_table, &chunk_idx, sizeof(hsize_t), chunk_entry);
if (chunk_entry == NULL)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find chunk entry")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFIND, FAIL, "unable to find chunk entry");
if (mpi_rank != chunk_entry->new_owner)
- HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk owner set to incorrect MPI rank")
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "chunk owner set to incorrect MPI rank");
/*
* Only process the chunk if its data buffer is allocated.
@@ -4525,27 +4528,27 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
else {
/* Decode the chunk file dataspace from the message */
if (NULL == (dataspace = H5S_decode(&msg_ptr)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTDECODE, FAIL, "unable to decode dataspace")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDECODE, FAIL, "unable to decode dataspace");
if (H5S_select_iter_init(sel_iter, dataspace, type_info->dst_type_size,
H5S_SEL_ITER_SHARE_WITH_DATASPACE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "unable to initialize memory selection information")
+ "unable to initialize memory selection information");
sel_iter_init = TRUE;
iter_nelmts = H5S_GET_SELECT_NPOINTS(dataspace);
/* Update the chunk data with the received modification data */
if (H5D__scatter_mem(msg_ptr, sel_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't scatter to write buffer")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't scatter to write buffer");
if (H5S_SELECT_ITER_RELEASE(sel_iter) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator");
sel_iter_init = FALSE;
if (dataspace) {
if (H5S_close(dataspace) < 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace");
dataspace = NULL;
}
@@ -4562,13 +4565,13 @@ H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *ch
&(chunk_list->chunk_infos[i].index_info.filter_mask), err_detect, filter_cb,
(size_t *)&chunk_list->chunk_infos[i].chunk_new.length,
&chunk_list->chunk_infos[i].chunk_buf_size, &chunk_list->chunk_infos[i].buf) < 0)
- HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed");
}
#if H5_SIZEOF_SIZE_T > 4
/* Check for the chunk expanding too much to encode in a 32-bit value */
if (chunk_list->chunk_infos[i].chunk_new.length > ((size_t)0xffffffff))
- HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
+ HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length");
#endif
}
@@ -4661,7 +4664,7 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t
if (H5D__mpio_get_chunk_alloc_info_types(&recv_type, &recv_type_derived, &send_type, &send_type_derived) <
0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
- "can't create derived datatypes for chunk file space info")
+ "can't create derived datatypes for chunk file space info");
/*
* Gather the new chunk sizes to all ranks for a collective reallocation
@@ -4698,7 +4701,8 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t
if (H5_mpio_gatherv_alloc(chunk_list->chunk_infos, (int)chunk_list->num_chunk_infos, send_type,
counts_ptr, displacements_ptr, recv_type, TRUE, 0, io_info->comm, mpi_rank,
mpi_size, &gathered_array, &collective_num_entries) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "can't gather chunk file space info to/from ranks")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
+ "can't gather chunk file space info to/from ranks");
}
else {
/*
@@ -4711,7 +4715,8 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t
if (H5_mpio_gatherv_alloc_simple(chunk_list->chunk_infos, (int)chunk_list->num_chunk_infos, send_type,
recv_type, TRUE, 0, io_info->comm, mpi_rank, mpi_size,
&gathered_array, &collective_num_entries) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "can't gather chunk file space info to/from ranks")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
+ "can't gather chunk file space info to/from ranks");
}
/* Collectively re-allocate the modified chunks (from each rank) in the file */
@@ -4723,7 +4728,7 @@ H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t
if (H5D__chunk_file_alloc(idx_info, &coll_entry->chunk_current, &coll_entry->chunk_new, &need_insert,
NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk");
/*
* If we just re-allocated a chunk that is local to this
@@ -4856,7 +4861,7 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *
if (H5D__mpio_get_chunk_insert_info_types(&recv_type, &recv_type_derived, &send_type,
&send_type_derived) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
- "can't create derived datatypes for chunk re-insertion info")
+ "can't create derived datatypes for chunk re-insertion info");
/*
* Gather information to all ranks for a collective re-insertion
@@ -4894,7 +4899,7 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *
counts_ptr, displacements_ptr, recv_type, TRUE, 0, io_info->comm, mpi_rank,
mpi_size, &gathered_array, &collective_num_entries) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
- "can't gather chunk index re-insertion info to/from ranks")
+ "can't gather chunk index re-insertion info to/from ranks");
}
else {
/*
@@ -4908,7 +4913,7 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *
recv_type, TRUE, 0, io_info->comm, mpi_rank, mpi_size,
&gathered_array, &collective_num_entries) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
- "can't gather chunk index re-insertion info to/from ranks")
+ "can't gather chunk index re-insertion info to/from ranks");
}
/* Initialize static chunk udata fields from chunk index info */
@@ -4979,7 +4984,7 @@ H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *
#endif
if ((idx_info->storage->ops->insert)(idx_info, &chunk_ud, di->dset) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk address into index")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk address into index");
}
done:
@@ -5054,7 +5059,7 @@ H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, hbool_t *
/* Create struct type for the inner H5F_block_t structure */
if (H5F_mpi_get_file_block_type(FALSE, &chunk_block_type, &chunk_block_type_derived) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description");
field_count = 5;
assert(field_count == (sizeof(types) / sizeof(MPI_Datatype)));
@@ -5195,7 +5200,7 @@ H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hbool_t *contig_
/* Create struct type for the inner H5F_block_t structure */
if (H5F_mpi_get_file_block_type(FALSE, &chunk_block_type, &chunk_block_type_derived) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description");
field_count = 3;
assert(field_count == (sizeof(types) / sizeof(MPI_Datatype)));
@@ -5326,7 +5331,7 @@ H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig
/* Create struct type for an H5F_block_t structure */
if (H5F_mpi_get_file_block_type(FALSE, &chunk_block_type, &chunk_block_type_derived) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description");
field_count = 4;
assert(field_count == (sizeof(types) / sizeof(MPI_Datatype)));
@@ -5454,22 +5459,23 @@ H5D__mpio_collective_filtered_vec_io(const H5D_filtered_collective_io_info_t *ch
if (iovec_count > 0) {
if (chunk_list->num_chunk_infos > UINT32_MAX)
HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL,
- "number of chunk entries in I/O operation exceeds UINT32_MAX")
+ "number of chunk entries in I/O operation exceeds UINT32_MAX");
if (NULL == (io_addrs = H5MM_malloc(iovec_count * sizeof(*io_addrs))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for I/O addresses vector")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "couldn't allocate space for I/O addresses vector");
if (NULL == (io_sizes = H5MM_malloc(iovec_count * sizeof(*io_sizes))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for I/O sizes vector")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate space for I/O sizes vector");
if (op_type == H5D_IO_OP_WRITE) {
if (NULL == (io_wbufs = H5MM_malloc(iovec_count * sizeof(*io_wbufs))))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
- "couldn't allocate space for I/O buffers vector")
+ "couldn't allocate space for I/O buffers vector");
}
else {
if (NULL == (io_rbufs = H5MM_malloc(iovec_count * sizeof(*io_rbufs))))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
- "couldn't allocate space for I/O buffers vector")
+ "couldn't allocate space for I/O buffers vector");
}
/*
@@ -5526,11 +5532,11 @@ H5D__mpio_collective_filtered_vec_io(const H5D_filtered_collective_io_info_t *ch
if (op_type == H5D_IO_OP_WRITE) {
if (H5F_shared_vector_write(f_sh, iovec_count, io_types, io_addrs, io_sizes, io_wbufs) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "vector write call failed")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "vector write call failed");
}
else {
if (H5F_shared_vector_read(f_sh, iovec_count, io_types, io_addrs, io_sizes, io_rbufs) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "vector read call failed")
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "vector read call failed");
}
done: