summaryrefslogtreecommitdiffstats
path: root/src/H5FDsubfiling
diff options
context:
space:
mode:
authorDana Robinson <43805+derobins@users.noreply.github.com>2023-06-28 14:31:56 (GMT)
committerGitHub <noreply@github.com>2023-06-28 14:31:56 (GMT)
commit7a44581a84778a1346a2fd5b6cca7d9db905a321 (patch)
tree44ea9c2d1b471eb227698abe8499c34cfa6d47d2 /src/H5FDsubfiling
parent622fcbd13881fbc58bbeaed3062583b759f5e864 (diff)
downloadhdf5-7a44581a84778a1346a2fd5b6cca7d9db905a321.zip
hdf5-7a44581a84778a1346a2fd5b6cca7d9db905a321.tar.gz
hdf5-7a44581a84778a1346a2fd5b6cca7d9db905a321.tar.bz2
Rename HDassert() to assert() (#3191)
* Change HDassert to assert * Fix bin/make_err
Diffstat (limited to 'src/H5FDsubfiling')
-rw-r--r--src/H5FDsubfiling/H5FDioc.c48
-rw-r--r--src/H5FDsubfiling/H5FDioc_int.c26
-rw-r--r--src/H5FDsubfiling/H5FDioc_priv.h30
-rw-r--r--src/H5FDsubfiling/H5FDioc_threads.c122
-rw-r--r--src/H5FDsubfiling/H5FDsubfile_int.c16
-rw-r--r--src/H5FDsubfiling/H5FDsubfiling.c136
-rw-r--r--src/H5FDsubfiling/H5subfiling_common.c164
7 files changed, 271 insertions, 271 deletions
diff --git a/src/H5FDsubfiling/H5FDioc.c b/src/H5FDsubfiling/H5FDioc.c
index d403e3c..2211e6e 100644
--- a/src/H5FDsubfiling/H5FDioc.c
+++ b/src/H5FDsubfiling/H5FDioc.c
@@ -422,7 +422,7 @@ H5FD__ioc_get_default_config(H5FD_ioc_config_t *config_out)
{
herr_t ret_value = SUCCEED;
- HDassert(config_out);
+ assert(config_out);
HDmemset(config_out, 0, sizeof(*config_out));
@@ -455,7 +455,7 @@ H5FD__ioc_validate_config(const H5FD_ioc_config_t *fa)
{
herr_t ret_value = SUCCEED;
- HDassert(fa != NULL);
+ assert(fa != NULL);
if (fa->version != H5FD_IOC_CURR_FAPL_VERSION)
H5_SUBFILING_GOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown H5FD_ioc_config_t version");
@@ -660,7 +660,7 @@ H5FD__ioc_fapl_copy(const void *_old_fa)
H5FD_IOC_LOG_CALL(__func__);
- HDassert(old_fa_ptr);
+ assert(old_fa_ptr);
new_fa_ptr = H5FL_CALLOC(H5FD_ioc_config_t);
if (NULL == new_fa_ptr)
@@ -695,7 +695,7 @@ H5FD__ioc_fapl_free(void *_fapl)
H5FD_IOC_LOG_CALL(__func__);
/* Check arguments */
- HDassert(fapl);
+ assert(fapl);
/* Free the property list */
fapl = H5FL_FREE(H5FD_ioc_config_t, fapl);
@@ -904,7 +904,7 @@ H5FD__ioc_close_int(H5FD_ioc_t *file_ptr)
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(file_ptr);
+ assert(file_ptr);
if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized)))
H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Finalized failed", mpi_code);
@@ -990,8 +990,8 @@ H5FD__ioc_cmp(const H5FD_t *_f1, const H5FD_t *_f2)
H5FD_IOC_LOG_CALL(__func__);
- HDassert(f1);
- HDassert(f2);
+ assert(f1);
+ assert(f2);
ret_value = (f1->file_id > f2->file_id) - (f1->file_id < f2->file_id);
@@ -1108,7 +1108,7 @@ H5FD__ioc_get_eoa(const H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type)
H5FD_IOC_LOG_CALL(__func__);
/* Sanity check */
- HDassert(file);
+ assert(file);
ret_value = file->eoa;
@@ -1134,7 +1134,7 @@ H5FD__ioc_set_eoa(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr)
H5FD_IOC_LOG_CALL(__func__);
/* Sanity check */
- HDassert(file);
+ assert(file);
file->eoa = addr;
@@ -1163,7 +1163,7 @@ H5FD__ioc_get_eof(const H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type)
H5FD_IOC_LOG_CALL(__func__);
/* Sanity check */
- HDassert(file);
+ assert(file);
sf_context = H5_get_subfiling_object(file->context_id);
if (sf_context) {
@@ -1222,8 +1222,8 @@ H5FD__ioc_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNUS
H5FD_IOC_LOG_CALL(__func__);
- HDassert(file && file->pub.cls);
- HDassert(buf);
+ assert(file && file->pub.cls);
+ assert(buf);
/* Check for overflow conditions */
if (!H5_addr_defined(addr))
@@ -1382,7 +1382,7 @@ H5FD__ioc_truncate(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, hbool_t H5_ATTR_
H5FD_IOC_LOG_CALL(__func__);
- HDassert(file);
+ assert(file);
/* TODO: placeholder for now since Subfiling does the truncation */
if (!H5_addr_eq(file->eoa, file->last_eoa)) {
@@ -1454,7 +1454,7 @@ H5FD__ioc_del(const char *name, hid_t fapl)
if (NULL == (plist = H5P_object_verify(fapl, H5P_FILE_ACCESS)))
H5_SUBFILING_GOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list");
- HDassert(H5FD_IOC == H5P_peek_driver(plist));
+ assert(H5FD_IOC == H5P_peek_driver(plist));
if (H5FD_mpi_self_initialized) {
comm = MPI_COMM_WORLD;
@@ -1610,10 +1610,10 @@ H5FD__ioc_write_vector_internal(H5FD_t *_file, uint32_t count, H5FD_mem_t H5_ATT
int64_t sf_context_id = -1;
herr_t ret_value = SUCCEED;
- HDassert(_file);
- HDassert(addrs);
- HDassert(sizes);
- HDassert(bufs);
+ assert(_file);
+ assert(addrs);
+ assert(sizes);
+ assert(bufs);
if (count == 0)
H5_SUBFILING_GOTO_DONE(SUCCEED);
@@ -1622,7 +1622,7 @@ H5FD__ioc_write_vector_internal(H5FD_t *_file, uint32_t count, H5FD_mem_t H5_ATT
if (NULL == (sf_context = H5_get_subfiling_object(sf_context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "can't get subfiling context from ID");
- HDassert(sf_context->topology);
+ assert(sf_context->topology);
/*
* Allocate an array of I/O requests and an array twice that size for
@@ -1691,10 +1691,10 @@ H5FD__ioc_read_vector_internal(H5FD_t *_file, uint32_t count, haddr_t addrs[], s
int64_t sf_context_id = -1;
herr_t ret_value = SUCCEED;
- HDassert(_file);
- HDassert(addrs);
- HDassert(sizes);
- HDassert(bufs);
+ assert(_file);
+ assert(addrs);
+ assert(sizes);
+ assert(bufs);
if (count == 0)
H5_SUBFILING_GOTO_DONE(SUCCEED);
@@ -1703,7 +1703,7 @@ H5FD__ioc_read_vector_internal(H5FD_t *_file, uint32_t count, haddr_t addrs[], s
if (NULL == (sf_context = H5_get_subfiling_object(sf_context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "can't get subfiling context from ID");
- HDassert(sf_context->topology);
+ assert(sf_context->topology);
/*
* Allocate an array of I/O requests and an array for MPI_Request
diff --git a/src/H5FDsubfiling/H5FDioc_int.c b/src/H5FDsubfiling/H5FDioc_int.c
index ce5a000..afe6b16 100644
--- a/src/H5FDsubfiling/H5FDioc_int.c
+++ b/src/H5FDsubfiling/H5FDioc_int.c
@@ -32,12 +32,12 @@ calculate_target_ioc(int64_t file_offset, int64_t stripe_size, int num_io_concen
int64_t subfile_row;
int64_t subfile_idx;
- HDassert(stripe_size > 0);
- HDassert(num_io_concentrators > 0);
- HDassert(num_subfiles > 0);
- HDassert(target_ioc);
- HDassert(ioc_file_offset);
- HDassert(ioc_subfile_idx);
+ assert(stripe_size > 0);
+ assert(num_io_concentrators > 0);
+ assert(num_subfiles > 0);
+ assert(target_ioc);
+ assert(ioc_file_offset);
+ assert(ioc_subfile_idx);
stripe_idx = file_offset / stripe_size;
subfile_row = stripe_idx / num_subfiles;
@@ -111,12 +111,12 @@ ioc__write_independent_async(int64_t context_id, int64_t offset, int64_t element
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(io_req);
+ assert(io_req);
if (NULL == (sf_context = H5_get_subfiling_object(context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "can't get subfiling context from ID");
- HDassert(sf_context->topology);
- HDassert(sf_context->topology->io_concentrators);
+ assert(sf_context->topology);
+ assert(sf_context->topology->io_concentrators);
io_concentrators = sf_context->topology->io_concentrators;
num_io_concentrators = sf_context->topology->n_io_concentrators;
@@ -283,14 +283,14 @@ ioc__read_independent_async(int64_t context_id, int64_t offset, int64_t elements
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(io_req);
+ assert(io_req);
H5_CHECK_OVERFLOW(elements, int64_t, int);
if (NULL == (sf_context = H5_get_subfiling_object(context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "can't get subfiling context from ID");
- HDassert(sf_context->topology);
- HDassert(sf_context->topology->io_concentrators);
+ assert(sf_context->topology);
+ assert(sf_context->topology->io_concentrators);
io_concentrators = sf_context->topology->io_concentrators;
num_io_concentrators = sf_context->topology->n_io_concentrators;
@@ -426,7 +426,7 @@ ioc__async_completion(MPI_Request *mpi_reqs, size_t num_reqs)
herr_t ret_value = SUCCEED;
int mpi_code;
- HDassert(mpi_reqs);
+ assert(mpi_reqs);
H5_CHECK_OVERFLOW(num_reqs, size_t, int);
if (MPI_SUCCESS != (mpi_code = MPI_Waitall((int)num_reqs, mpi_reqs, MPI_STATUSES_IGNORE)))
diff --git a/src/H5FDsubfiling/H5FDioc_priv.h b/src/H5FDsubfiling/H5FDioc_priv.h
index 8dec1a8..cac2806 100644
--- a/src/H5FDsubfiling/H5FDioc_priv.h
+++ b/src/H5FDsubfiling/H5FDioc_priv.h
@@ -79,15 +79,15 @@
#define H5FD_IOC__Q_APPEND(q_ptr, entry_ptr) \
do { \
- HDassert(q_ptr); \
- HDassert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \
- HDassert((((q_ptr)->q_len == 0) && ((q_ptr)->q_head == NULL) && ((q_ptr)->q_tail == NULL)) || \
+ assert(q_ptr); \
+ assert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \
+ assert((((q_ptr)->q_len == 0) && ((q_ptr)->q_head == NULL) && ((q_ptr)->q_tail == NULL)) || \
(((q_ptr)->q_len > 0) && ((q_ptr)->q_head != NULL) && ((q_ptr)->q_tail != NULL))); \
- HDassert(entry_ptr); \
- HDassert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \
- HDassert((entry_ptr)->next == NULL); \
- HDassert((entry_ptr)->prev == NULL); \
- HDassert((entry_ptr)->in_progress == FALSE); \
+ assert(entry_ptr); \
+ assert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \
+ assert((entry_ptr)->next == NULL); \
+ assert((entry_ptr)->prev == NULL); \
+ assert((entry_ptr)->in_progress == FALSE); \
\
if ( ((q_ptr)->q_head) == NULL ) \
{ \
@@ -105,15 +105,15 @@ do {
#define H5FD_IOC__Q_REMOVE(q_ptr, entry_ptr) \
do { \
- HDassert(q_ptr); \
- HDassert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \
- HDassert((((q_ptr)->q_len == 1) && ((q_ptr)->q_head ==((q_ptr)->q_tail)) && ((q_ptr)->q_head == (entry_ptr))) || \
+ assert(q_ptr); \
+ assert((q_ptr)->magic == H5FD_IOC__IO_Q_MAGIC); \
+ assert((((q_ptr)->q_len == 1) && ((q_ptr)->q_head ==((q_ptr)->q_tail)) && ((q_ptr)->q_head == (entry_ptr))) || \
(((q_ptr)->q_len > 0) && ((q_ptr)->q_head != NULL) && ((q_ptr)->q_tail != NULL))); \
- HDassert(entry_ptr); \
- HDassert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \
- HDassert((((q_ptr)->q_len == 1) && ((entry_ptr)->next == NULL) && ((entry_ptr)->prev == NULL)) || \
+ assert(entry_ptr); \
+ assert((entry_ptr)->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC); \
+ assert((((q_ptr)->q_len == 1) && ((entry_ptr)->next == NULL) && ((entry_ptr)->prev == NULL)) || \
(((q_ptr)->q_len > 1) && (((entry_ptr)->next != NULL) || ((entry_ptr)->prev != NULL)))); \
- HDassert((entry_ptr)->in_progress == TRUE); \
+ assert((entry_ptr)->in_progress == TRUE); \
\
{ \
if ( (((q_ptr)->q_head)) == (entry_ptr) ) \
diff --git a/src/H5FDsubfiling/H5FDioc_threads.c b/src/H5FDsubfiling/H5FDioc_threads.c
index abf816d..579b686 100644
--- a/src/H5FDsubfiling/H5FDioc_threads.c
+++ b/src/H5FDsubfiling/H5FDioc_threads.c
@@ -119,7 +119,7 @@ initialize_ioc_threads(void *_sf_context)
double t_start = 0.0, t_end = 0.0;
#endif
- HDassert(sf_context);
+ assert(sf_context);
/*
* Allocate and initialize IOC data that will be passed
@@ -213,12 +213,12 @@ finalize_ioc_threads(void *_sf_context)
ioc_data_t *ioc_data = NULL;
int ret_value = 0;
- HDassert(sf_context);
- HDassert(sf_context->topology->rank_is_ioc);
+ assert(sf_context);
+ assert(sf_context->topology->rank_is_ioc);
ioc_data = sf_context->ioc_data;
if (ioc_data) {
- HDassert(0 == atomic_load(&ioc_data->sf_shutdown_flag));
+ assert(0 == atomic_load(&ioc_data->sf_shutdown_flag));
/* Shutdown the main IOC thread */
atomic_store(&ioc_data->sf_shutdown_flag, 1);
@@ -229,7 +229,7 @@ finalize_ioc_threads(void *_sf_context)
} while (0 != atomic_load(&ioc_data->sf_shutdown_flag));
/* Tear down IOC worker thread pool */
- HDassert(0 == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(0 == atomic_load(&ioc_data->sf_io_ops_pending));
hg_thread_pool_destroy(ioc_data->io_thread_pool);
hg_thread_mutex_destroy(&ioc_data->io_queue.q_mutex);
@@ -345,10 +345,10 @@ ioc_main(ioc_data_t *ioc_data)
int shutdown_requested;
int ret_value = 0;
- HDassert(ioc_data);
+ assert(ioc_data);
context = H5_get_subfiling_object(ioc_data->sf_context_id);
- HDassert(context);
+ assert(context);
/* We can't have opened any files at this point..
* The file open approach has changed so that the normal
@@ -416,7 +416,7 @@ ioc_main(ioc_data_t *ioc_data)
ioc_io_queue_add_entry(ioc_data, &wk_req);
- HDassert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
+ assert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
}
else {
struct timespec sleep_spec = {0, IOC_MAIN_SLEEP_DELAY};
@@ -501,15 +501,15 @@ handle_work_request(void *arg)
int op_ret;
hg_thread_ret_t ret_value = 0;
- HDassert(q_entry_ptr);
- HDassert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
- HDassert(q_entry_ptr->in_progress);
+ assert(q_entry_ptr);
+ assert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(q_entry_ptr->in_progress);
sf_context = H5_get_subfiling_object(file_context_id);
- HDassert(sf_context);
+ assert(sf_context);
ioc_data = sf_context->ioc_data;
- HDassert(ioc_data);
+ assert(ioc_data);
atomic_fetch_add(&ioc_data->sf_work_pending, 1);
@@ -559,14 +559,14 @@ handle_work_request(void *arg)
#ifdef H5FD_IOC_DEBUG
{
int curr_io_ops_pending = atomic_load(&ioc_data->sf_io_ops_pending);
- HDassert(curr_io_ops_pending > 0);
+ assert(curr_io_ops_pending > 0);
}
#endif
/* complete the I/O request */
ioc_io_queue_complete_entry(ioc_data, q_entry_ptr);
- HDassert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
+ assert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
/* Check the I/O Queue to see if there are any dispatchable entries */
ioc_io_queue_dispatch_eligible_entries(ioc_data, 1);
@@ -617,7 +617,7 @@ send_ack_to_client(int ack_val, int dest_rank, int source_rank, int msg_tag, MPI
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(ack_val > 0);
+ assert(ack_val > 0);
(void)source_rank;
@@ -697,7 +697,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
@@ -712,7 +712,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
}
sf_context = H5_get_subfiling_object(file_context_id);
- HDassert(sf_context);
+ assert(sf_context);
stripe_id = file_offset + data_size;
sf_eof = (haddr_t)(stripe_id % sf_context->sf_stripe_size);
@@ -751,7 +751,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
* allows us to distinguish between multiple concurrent
* writes from a single rank.
*/
- HDassert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
+ assert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
rcv_tag = (int)(counter % (INT_MAX - IO_TAG_BASE));
rcv_tag %= (*H5FD_IOC_tag_ub_val_ptr - IO_TAG_BASE);
rcv_tag += IO_TAG_BASE;
@@ -787,7 +787,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
#endif
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
sf_fid = sf_context->sf_fids[subfile_idx];
#ifdef H5_SUBFILING_DEBUG
@@ -883,12 +883,12 @@ ioc_file_queue_read_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_C
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
sf_context = H5_get_subfiling_object(file_context_id);
- HDassert(sf_context);
+ assert(sf_context);
/*
* If we are using 1 subfile per IOC, we can optimize reads
@@ -952,7 +952,7 @@ ioc_file_queue_read_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_C
* reads from a single rank, which can happen when a rank
* owns multiple subfiles.
*/
- HDassert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
+ assert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
send_tag = (int)(counter % (INT_MAX - IO_TAG_BASE));
send_tag %= (*H5FD_IOC_tag_ub_val_ptr - IO_TAG_BASE);
send_tag += IO_TAG_BASE;
@@ -964,7 +964,7 @@ ioc_file_queue_read_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_C
}
/* Read data from the subfile */
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
sf_fid = sf_context->sf_fids[subfile_idx];
if (sf_fid < 0)
H5_SUBFILING_GOTO_ERROR(H5E_IO, H5E_BADVALUE, -1, "subfile file descriptor %d is invalid", sf_fid);
@@ -1107,7 +1107,7 @@ ioc_file_read_data(int fd, int64_t file_offset, void *data_buffer, int64_t data_
file_offset += bytes_read;
}
else if (bytes_read == 0) {
- HDassert(bytes_remaining > 0);
+ assert(bytes_remaining > 0);
/* end of file but not end of format address space */
HDmemset(this_buffer, 0, (size_t)bytes_remaining);
@@ -1145,7 +1145,7 @@ ioc_file_truncate(sf_work_request_t *msg)
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
ioc_idx = msg->ioc_idx;
@@ -1160,7 +1160,7 @@ ioc_file_truncate(sf_work_request_t *msg)
if (NULL == (sf_context = H5_get_subfiling_object(file_context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_CANTGET, -1, "couldn't retrieve subfiling context");
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
fd = sf_context->sf_fids[subfile_idx];
@@ -1219,7 +1219,7 @@ ioc_file_report_eof(sf_work_request_t *msg, MPI_Comm comm)
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
source = msg->source;
@@ -1230,7 +1230,7 @@ ioc_file_report_eof(sf_work_request_t *msg, MPI_Comm comm)
if (NULL == (sf_context = H5_get_subfiling_object(file_context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_CANTGET, -1, "couldn't retrieve subfiling context");
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
fd = sf_context->sf_fids[subfile_idx];
@@ -1326,21 +1326,21 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr)
{
ioc_io_queue_entry_t *entry_ptr = NULL;
- HDassert(ioc_data);
- HDassert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
- HDassert(wk_req_ptr);
+ assert(ioc_data);
+ assert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
+ assert(wk_req_ptr);
entry_ptr = ioc_io_queue_alloc_entry();
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(entry_ptr);
+ assert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
HDmemcpy((void *)(&(entry_ptr->wk_req)), (const void *)wk_req_ptr, sizeof(sf_work_request_t));
/* must obtain io_queue mutex before appending */
hg_thread_mutex_lock(&ioc_data->io_queue.q_mutex);
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
entry_ptr->counter = ioc_data->io_queue.req_counter++;
@@ -1360,7 +1360,7 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr)
atomic_load(&ioc_data->sf_io_ops_pending));
#endif
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
#ifdef H5FD_IOC_COLLECT_STATS
entry_ptr->q_time = H5_now_usec();
@@ -1398,7 +1398,7 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr)
}
#endif
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
hg_thread_mutex_unlock(&ioc_data->io_queue.q_mutex);
@@ -1457,8 +1457,8 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
ioc_io_queue_entry_t *entry_ptr = NULL;
ioc_io_queue_entry_t *scan_ptr = NULL;
- HDassert(ioc_data);
- HDassert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
+ assert(ioc_data);
+ assert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
if (try_lock) {
if (hg_thread_mutex_try_lock(&ioc_data->io_queue.q_mutex) < 0)
@@ -1470,11 +1470,11 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
entry_ptr = ioc_data->io_queue.q_head;
/* sanity check on first element in the I/O queue */
- HDassert((entry_ptr == NULL) || (entry_ptr->prev == NULL));
+ assert((entry_ptr == NULL) || (entry_ptr->prev == NULL));
while ((entry_ptr) && (ioc_data->io_queue.num_pending > 0)) {
- HDassert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
/* Check for a get EOF or truncate operation at head of queue */
if (ioc_data->io_queue.q_head->in_progress) {
@@ -1499,7 +1499,7 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
scan_ptr = entry_ptr->prev;
- HDassert((scan_ptr == NULL) || (scan_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC));
+ assert((scan_ptr == NULL) || (scan_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC));
if ((entry_ptr->wk_req.tag == TRUNC_OP) || (entry_ptr->wk_req.tag == GET_EOF_OP)) {
@@ -1539,18 +1539,18 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
if (!conflict_detected) { /* dispatch I/O request */
- HDassert(scan_ptr == NULL);
- HDassert(!entry_ptr->in_progress);
+ assert(scan_ptr == NULL);
+ assert(!entry_ptr->in_progress);
entry_ptr->in_progress = TRUE;
- HDassert(ioc_data->io_queue.num_pending > 0);
+ assert(ioc_data->io_queue.num_pending > 0);
ioc_data->io_queue.num_pending--;
ioc_data->io_queue.num_in_progress++;
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress ==
- ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress ==
+ ioc_data->io_queue.q_len);
entry_ptr->thread_wk.func = handle_work_request;
entry_ptr->thread_wk.args = entry_ptr;
@@ -1583,7 +1583,7 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
entry_ptr = entry_ptr->next;
}
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
hg_thread_mutex_unlock(&ioc_data->io_queue.q_mutex);
} /* ioc_io_queue_dispatch_eligible_entries() */
@@ -1616,16 +1616,16 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt
uint64_t execution_time;
#endif
- HDassert(ioc_data);
- HDassert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(ioc_data);
+ assert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
+ assert(entry_ptr);
+ assert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
/* must obtain io_queue mutex before deleting and updating stats */
hg_thread_mutex_lock(&ioc_data->io_queue.q_mutex);
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
- HDassert(ioc_data->io_queue.num_in_progress > 0);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_in_progress > 0);
if (entry_ptr->wk_ret < 0)
ioc_data->io_queue.num_failed++;
@@ -1634,7 +1634,7 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt
ioc_data->io_queue.num_in_progress--;
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
atomic_fetch_sub(&ioc_data->sf_io_ops_pending, 1);
@@ -1652,10 +1652,10 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt
* there aren't other operations in progress
*/
if ((entry_ptr->wk_req.tag == GET_EOF_OP) || (entry_ptr->wk_req.tag == TRUNC_OP))
- HDassert(ioc_data->io_queue.num_in_progress == 0);
+ assert(ioc_data->io_queue.num_in_progress == 0);
#endif
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
#ifdef H5FD_IOC_COLLECT_STATS
/* Compute the queued and execution time */
@@ -1696,10 +1696,10 @@ static void
ioc_io_queue_free_entry(ioc_io_queue_entry_t *q_entry_ptr)
{
/* use assertions for error checking, since the following should never fail. */
- HDassert(q_entry_ptr);
- HDassert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
- HDassert(q_entry_ptr->next == NULL);
- HDassert(q_entry_ptr->prev == NULL);
+ assert(q_entry_ptr);
+ assert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(q_entry_ptr->next == NULL);
+ assert(q_entry_ptr->prev == NULL);
q_entry_ptr->magic = 0;
diff --git a/src/H5FDsubfiling/H5FDsubfile_int.c b/src/H5FDsubfiling/H5FDsubfile_int.c
index 4c583e8..3a60af1 100644
--- a/src/H5FDsubfiling/H5FDsubfile_int.c
+++ b/src/H5FDsubfiling/H5FDsubfile_int.c
@@ -175,7 +175,7 @@ H5FD__subfiling__truncate_sub_files(hid_t context_id, int64_t logical_file_eof,
}
}
- HDassert(test_file_eof == logical_file_eof);
+ assert(test_file_eof == logical_file_eof);
}
#endif /* NDEBUG */
}
@@ -295,18 +295,18 @@ H5FD__subfiling__get_real_eof(hid_t context_id, int64_t *logical_eof_ptr)
int mpi_code; /* MPI return code */
herr_t ret_value = SUCCEED; /* Return value */
- HDassert(logical_eof_ptr);
+ assert(logical_eof_ptr);
if (NULL == (sf_context = (subfiling_context_t *)H5_get_subfiling_object(context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "can't get subfile context");
- HDassert(sf_context->topology);
+ assert(sf_context->topology);
n_io_concentrators = sf_context->topology->n_io_concentrators;
num_subfiles = sf_context->sf_num_subfiles;
- HDassert(n_io_concentrators > 0);
- HDassert(num_subfiles >= n_io_concentrators);
+ assert(n_io_concentrators > 0);
+ assert(num_subfiles >= n_io_concentrators);
if (NULL == (sf_eofs = HDmalloc((size_t)num_subfiles * sizeof(int64_t))))
H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocate subfile EOFs array");
@@ -352,9 +352,9 @@ H5FD__subfiling__get_real_eof(hid_t context_id, int64_t *logical_eof_ptr)
for (int i = 0; i < num_subfiles; i++) {
int ioc_rank = (int)recv_msg[3 * i];
- HDassert(ioc_rank >= 0);
- HDassert(ioc_rank < n_io_concentrators);
- HDassert(sf_eofs[i] == -1);
+ assert(ioc_rank >= 0);
+ assert(ioc_rank < n_io_concentrators);
+ assert(sf_eofs[i] == -1);
sf_eofs[i] = recv_msg[(3 * i) + 1];
}
diff --git a/src/H5FDsubfiling/H5FDsubfiling.c b/src/H5FDsubfiling/H5FDsubfiling.c
index 98a921b..e75b2f4 100644
--- a/src/H5FDsubfiling/H5FDsubfiling.c
+++ b/src/H5FDsubfiling/H5FDsubfiling.c
@@ -585,7 +585,7 @@ H5FD__subfiling_get_default_config(hid_t fapl_id, H5FD_subfiling_config_t *confi
char *h5_require_ioc;
herr_t ret_value = SUCCEED;
- HDassert(config_out);
+ assert(config_out);
HDmemset(config_out, 0, sizeof(*config_out));
@@ -668,7 +668,7 @@ H5FD__subfiling_validate_config(const H5FD_subfiling_config_t *fa)
{
herr_t ret_value = SUCCEED;
- HDassert(fa != NULL);
+ assert(fa != NULL);
if (fa->version != H5FD_SUBFILING_CURR_FAPL_VERSION)
H5_SUBFILING_GOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "unknown H5FD_subfiling_config_t version");
@@ -706,7 +706,7 @@ H5FD__subfiling_sb_size(H5FD_t *_file)
H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file;
hsize_t ret_value = 0;
- HDassert(file);
+ assert(file);
/* Configuration structure magic number */
ret_value += sizeof(uint32_t);
@@ -1012,7 +1012,7 @@ H5FD__copy_plist(hid_t fapl_id, hid_t *id_out_ptr)
int ret_value = 0;
H5P_genplist_t *plist_ptr = NULL;
- HDassert(id_out_ptr != NULL);
+ assert(id_out_ptr != NULL);
if (FALSE == H5P_isa_class(fapl_id, H5P_FILE_ACCESS))
H5_SUBFILING_GOTO_ERROR(H5E_ARGS, H5E_BADTYPE, -1, "not a file access property list");
@@ -1091,7 +1091,7 @@ H5FD__subfiling_fapl_free(void *_fa)
H5FD_subfiling_config_t *fa = (H5FD_subfiling_config_t *)_fa;
herr_t ret_value = SUCCEED;
- HDassert(fa != NULL); /* sanity check */
+ assert(fa != NULL); /* sanity check */
if (fa->ioc_fapl_id >= 0 && H5I_dec_ref(fa->ioc_fapl_id) < 0)
H5_SUBFILING_DONE_ERROR(H5E_PLIST, H5E_CANTDEC, FAIL, "can't close IOC FAPL");
@@ -1318,7 +1318,7 @@ H5FD__subfiling_close_int(H5FD_subfiling_t *file_ptr)
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(file_ptr);
+ assert(file_ptr);
if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized)))
H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Finalized failed", mpi_code);
@@ -1404,8 +1404,8 @@ H5FD__subfiling_cmp(const H5FD_t *_f1, const H5FD_t *_f2)
const H5FD_subfiling_t *f2 = (const H5FD_subfiling_t *)_f2;
int ret_value = 0;
- HDassert(f1);
- HDassert(f2);
+ assert(f1);
+ assert(f2);
ret_value = H5FD_cmp(f1->sf_file, f2->sf_file);
@@ -1585,8 +1585,8 @@ H5FD__subfiling_read(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_i
int num_subfiles;
herr_t ret_value = SUCCEED;
- HDassert(file_ptr && file_ptr->pub.cls);
- HDassert(buf);
+ assert(file_ptr && file_ptr->pub.cls);
+ assert(buf);
/* Check for overflow conditions */
if (!H5_addr_defined(addr))
@@ -1636,8 +1636,8 @@ H5FD__subfiling_read(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_i
* underlying I/O operations.
*/
sf_context = (subfiling_context_t *)H5_get_subfiling_object(file_ptr->context_id);
- HDassert(sf_context);
- HDassert(sf_context->topology);
+ assert(sf_context);
+ assert(sf_context->topology);
num_subfiles = sf_context->sf_num_subfiles;
@@ -1820,8 +1820,8 @@ H5FD__subfiling_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_
int num_subfiles;
herr_t ret_value = SUCCEED;
- HDassert(file_ptr && file_ptr->pub.cls);
- HDassert(buf);
+ assert(file_ptr && file_ptr->pub.cls);
+ assert(buf);
/* Check for overflow conditions */
if (!H5_addr_defined(addr))
@@ -1868,8 +1868,8 @@ H5FD__subfiling_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_
* underlying I/O operations.
*/
sf_context = (subfiling_context_t *)H5_get_subfiling_object(file_ptr->context_id);
- HDassert(sf_context);
- HDassert(sf_context->topology);
+ assert(sf_context);
+ assert(sf_context->topology);
num_subfiles = sf_context->sf_num_subfiles;
@@ -2137,8 +2137,8 @@ H5FD__subfiling_read_vector(H5FD_t *_file, hid_t dxpl_id, uint32_t count, H5FD_m
H5FD_mem_t type;
haddr_t eoa;
- HDassert((count == 0) || (sizes[0] != 0));
- HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
+ assert((count == 0) || (sizes[0] != 0));
+ assert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
if (H5CX_get_io_xfer_mode(&xfer_mode) < 0)
H5_SUBFILING_GOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL,
@@ -2254,7 +2254,7 @@ H5FD__subfiling_write_vector(H5FD_t *_file, hid_t dxpl_id, uint32_t count, H5FD_
H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_INDEPENDENT;
herr_t ret_value = SUCCEED; /* Return value */
- HDassert(file_ptr != NULL); /* sanity check */
+ assert(file_ptr != NULL); /* sanity check */
/* Check arguments
* RAW - Do we really need to check arguments once again?
@@ -2297,8 +2297,8 @@ H5FD__subfiling_write_vector(H5FD_t *_file, hid_t dxpl_id, uint32_t count, H5FD_
H5FD_mem_t type;
haddr_t eoa;
- HDassert((count == 0) || (sizes[0] != 0));
- HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
+ assert((count == 0) || (sizes[0] != 0));
+ assert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
if (H5CX_get_io_xfer_mode(&xfer_mode) < 0)
H5_SUBFILING_GOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL,
@@ -2390,7 +2390,7 @@ H5FD__subfiling_truncate(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, hbool_t H5
H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file;
herr_t ret_value = SUCCEED; /* Return value */
- HDassert(file);
+ assert(file);
/* Extend the file to make sure it's large enough */
if (!H5_addr_eq(file->eoa, file->last_eoa)) {
@@ -2469,7 +2469,7 @@ H5FD__subfiling_lock(H5FD_t *_file, hbool_t rw)
H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file; /* VFD file struct */
herr_t ret_value = SUCCEED; /* Return value */
- HDassert(file);
+ assert(file);
if (file->fa.require_ioc) {
#ifdef VERBOSE
@@ -2502,7 +2502,7 @@ H5FD__subfiling_unlock(H5FD_t *_file)
H5FD_subfiling_t *file = (H5FD_subfiling_t *)_file; /* VFD file struct */
herr_t ret_value = SUCCEED; /* Return value */
- HDassert(file);
+ assert(file);
if (H5FD_unlock(file->sf_file) < 0)
H5_SUBFILING_SYS_GOTO_ERROR(H5E_FILE, H5E_BADFILE, FAIL, "unable to lock file");
@@ -2581,14 +2581,14 @@ H5FD__subfiling_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void
herr_t ret_value = SUCCEED; /* Return value */
/* Sanity checks */
- HDassert(file);
- HDassert(H5FD_SUBFILING == file->pub.driver_id);
+ assert(file);
+ assert(H5FD_SUBFILING == file->pub.driver_id);
switch (op_code) {
case H5FD_CTL_GET_MPI_COMMUNICATOR_OPCODE:
- HDassert(output);
- HDassert(*output);
+ assert(output);
+ assert(*output);
/*
* Return a separate MPI communicator to the caller so
@@ -2603,14 +2603,14 @@ H5FD__subfiling_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void
break;
case H5FD_CTL_GET_MPI_RANK_OPCODE:
- HDassert(output);
- HDassert(*output);
+ assert(output);
+ assert(*output);
**((int **)output) = file->mpi_rank;
break;
case H5FD_CTL_GET_MPI_SIZE_OPCODE:
- HDassert(output);
- HDassert(*output);
+ assert(output);
+ assert(*output);
**((int **)output) = file->mpi_size;
break;
@@ -2740,17 +2740,17 @@ init_indep_io(subfiling_context_t *sf_context, int64_t file_offset, size_t io_ne
int num_subfiles = 0;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(sf_context->sf_stripe_size > 0);
- HDassert(sf_context->sf_blocksize_per_stripe > 0);
- HDassert(sf_context->sf_num_subfiles > 0);
- HDassert(sf_context->topology);
- HDassert(mem_buf_offset);
- HDassert(target_file_offset);
- HDassert(io_block_len);
- HDassert(first_subfile_index);
- HDassert(n_subfiles_used);
- HDassert(max_io_req_per_subfile);
+ assert(sf_context);
+ assert(sf_context->sf_stripe_size > 0);
+ assert(sf_context->sf_blocksize_per_stripe > 0);
+ assert(sf_context->sf_num_subfiles > 0);
+ assert(sf_context->topology);
+ assert(mem_buf_offset);
+ assert(target_file_offset);
+ assert(io_block_len);
+ assert(first_subfile_index);
+ assert(n_subfiles_used);
+ assert(max_io_req_per_subfile);
*first_subfile_index = 0;
*n_subfiles_used = 0;
@@ -2808,8 +2808,8 @@ init_indep_io(subfiling_context_t *sf_context, int64_t file_offset, size_t io_ne
/* Determine the size of data written to the first and last stripes */
start_length = MIN(data_size, (stripe_size - offset_in_stripe));
final_length = (start_length == data_size ? 0 : final_offset % stripe_size);
- HDassert(start_length <= stripe_size);
- HDassert(final_length <= stripe_size);
+ assert(start_length <= stripe_size);
+ assert(final_length <= stripe_size);
/*
* Determine which subfile the I/O request begins in and which
@@ -2960,8 +2960,8 @@ init_indep_io(subfiling_context_t *sf_context, int64_t file_offset, size_t io_ne
}
if (thin_uniform_section) {
- HDassert(iovec_depth > 1);
- HDassert(num_full_stripes > 1);
+ assert(iovec_depth > 1);
+ assert(num_full_stripes > 1);
iovec_depth--;
num_full_stripes--;
@@ -3033,7 +3033,7 @@ init_indep_io(subfiling_context_t *sf_context, int64_t file_offset, size_t io_ne
row_offset += block_size;
}
- HDassert(offset_in_block <= block_size);
+ assert(offset_in_block <= block_size);
}
if (total_bytes != data_size)
@@ -3077,11 +3077,11 @@ iovec_fill_first(subfiling_context_t *sf_context, int64_t iovec_depth, int64_t t
int64_t total_bytes = 0;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(mem_offset_out);
- HDassert(target_file_offset_out);
- HDassert(io_block_len_out);
- HDassert(iovec_depth > 0);
+ assert(sf_context);
+ assert(mem_offset_out);
+ assert(target_file_offset_out);
+ assert(io_block_len_out);
+ assert(iovec_depth > 0);
stripe_size = sf_context->sf_stripe_size;
block_size = sf_context->sf_blocksize_per_stripe;
@@ -3176,11 +3176,11 @@ iovec_fill_last(subfiling_context_t *sf_context, int64_t iovec_depth, int64_t ta
int64_t total_bytes = 0;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(mem_offset_out);
- HDassert(target_file_offset_out);
- HDassert(io_block_len_out);
- HDassert(iovec_depth > 0);
+ assert(sf_context);
+ assert(mem_offset_out);
+ assert(target_file_offset_out);
+ assert(io_block_len_out);
+ assert(iovec_depth > 0);
stripe_size = sf_context->sf_stripe_size;
block_size = sf_context->sf_blocksize_per_stripe;
@@ -3308,11 +3308,11 @@ iovec_fill_first_last(subfiling_context_t *sf_context, int64_t iovec_depth, int6
int64_t total_bytes = 0;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(mem_offset_out);
- HDassert(target_file_offset_out);
- HDassert(io_block_len_out);
- HDassert(iovec_depth > 0);
+ assert(sf_context);
+ assert(mem_offset_out);
+ assert(target_file_offset_out);
+ assert(io_block_len_out);
+ assert(iovec_depth > 0);
stripe_size = sf_context->sf_stripe_size;
block_size = sf_context->sf_blocksize_per_stripe;
@@ -3418,11 +3418,11 @@ iovec_fill_uniform(subfiling_context_t *sf_context, int64_t iovec_depth, int64_t
int64_t total_bytes = 0;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(mem_offset_out);
- HDassert(target_file_offset_out);
- HDassert(io_block_len_out);
- HDassert((iovec_depth > 0) || (target_datasize == 0));
+ assert(sf_context);
+ assert(mem_offset_out);
+ assert(target_file_offset_out);
+ assert(io_block_len_out);
+ assert((iovec_depth > 0) || (target_datasize == 0));
stripe_size = sf_context->sf_stripe_size;
block_size = sf_context->sf_blocksize_per_stripe;
diff --git a/src/H5FDsubfiling/H5subfiling_common.c b/src/H5FDsubfiling/H5subfiling_common.c
index 7dac2db..2874e64 100644
--- a/src/H5FDsubfiling/H5subfiling_common.c
+++ b/src/H5FDsubfiling/H5subfiling_common.c
@@ -186,7 +186,7 @@ H5_get_subfiling_object(int64_t object_id)
* If we had to make more space, the given object index
* should always fall within range after a single re-allocation
*/
- HDassert((size_t)obj_index < sf_context_cache_size);
+ assert((size_t)obj_index < sf_context_cache_size);
}
/*
@@ -194,11 +194,11 @@ H5_get_subfiling_object(int64_t object_id)
* application exit, context entry indices should just be
* consecutive
*/
- HDassert((size_t)obj_index <= sf_context_cache_num_entries);
+ assert((size_t)obj_index <= sf_context_cache_num_entries);
if ((size_t)obj_index < sf_context_cache_num_entries)
ret_value = sf_context_cache[obj_index];
else {
- HDassert(!sf_context_cache[sf_context_cache_num_entries]);
+ assert(!sf_context_cache[sf_context_cache_num_entries]);
/* Allocate a new subfiling context object */
if (NULL == (ret_value = HDcalloc(1, sizeof(subfiling_context_t))))
@@ -244,7 +244,7 @@ H5_get_subfiling_object(int64_t object_id)
* If we had to make more space, the given object index
* should always fall within range after a single re-allocation
*/
- HDassert((size_t)obj_index < sf_topology_cache_size);
+ assert((size_t)obj_index < sf_topology_cache_size);
}
/*
@@ -252,11 +252,11 @@ H5_get_subfiling_object(int64_t object_id)
* application exit, topology entry indices should just be
* consecutive
*/
- HDassert((size_t)obj_index <= sf_topology_cache_num_entries);
+ assert((size_t)obj_index <= sf_topology_cache_num_entries);
if ((size_t)obj_index < sf_topology_cache_num_entries)
ret_value = sf_topology_cache[obj_index];
else {
- HDassert(!sf_topology_cache[sf_topology_cache_num_entries]);
+ assert(!sf_topology_cache[sf_topology_cache_num_entries]);
/* Allocate a new subfiling topology object */
if (NULL == (ret_value = HDmalloc(sizeof(sf_topology_t))))
@@ -308,15 +308,15 @@ H5_free_subfiling_object(int64_t object_id)
if (H5_free_subfiling_object_int(sf_context) < 0)
H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "couldn't free subfiling object");
- HDassert(sf_context_cache_num_entries > 0);
- HDassert(sf_context == sf_context_cache[sf_context_cache_num_entries - 1]);
+ assert(sf_context_cache_num_entries > 0);
+ assert(sf_context == sf_context_cache[sf_context_cache_num_entries - 1]);
sf_context_cache[sf_context_cache_num_entries - 1] = NULL;
sf_context_cache_num_entries--;
}
else {
sf_topology_t *sf_topology;
- HDassert(obj_type == SF_TOPOLOGY);
+ assert(obj_type == SF_TOPOLOGY);
if (NULL == (sf_topology = H5_get_subfiling_object(object_id)))
H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL,
@@ -325,8 +325,8 @@ H5_free_subfiling_object(int64_t object_id)
if (H5_free_subfiling_topology(sf_topology) < 0)
H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "couldn't free subfiling topology");
- HDassert(sf_topology_cache_num_entries > 0);
- HDassert(sf_topology == sf_topology_cache[sf_topology_cache_num_entries - 1]);
+ assert(sf_topology_cache_num_entries > 0);
+ assert(sf_topology == sf_topology_cache[sf_topology_cache_num_entries - 1]);
sf_topology_cache[sf_topology_cache_num_entries - 1] = NULL;
sf_topology_cache_num_entries--;
}
@@ -342,7 +342,7 @@ H5_free_subfiling_object_int(subfiling_context_t *sf_context)
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
+ assert(sf_context);
if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized))) {
/* Assume MPI is finalized or worse, and try to clean up what we can */
@@ -432,7 +432,7 @@ H5_free_subfiling_topology(sf_topology_t *topology)
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(topology);
+ assert(topology);
if (MPI_SUCCESS != (mpi_code = MPI_Finalized(&mpi_finalized))) {
/* Assume MPI is finalized or worse, but clean up what we can */
@@ -448,7 +448,7 @@ H5_free_subfiling_topology(sf_topology_t *topology)
for (size_t i = 0; i < sf_topology_cache_num_entries; i++)
if (topology == sf_topology_cache[i])
topology_cached = TRUE;
- HDassert(topology_cached);
+ assert(topology_cached);
}
#endif
@@ -758,7 +758,7 @@ init_subfiling(const char *base_filename, uint64_t file_id, H5FD_subfiling_param
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(context_id_out);
+ assert(context_id_out);
if (MPI_SUCCESS != (mpi_code = MPI_Comm_rank(comm, &mpi_rank)))
H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Comm_rank failed", mpi_code);
@@ -960,11 +960,11 @@ init_app_topology(H5FD_subfiling_params_t *subfiling_config, MPI_Comm comm, MPI_
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(subfiling_config);
- HDassert(MPI_COMM_NULL != comm);
- HDassert(MPI_COMM_NULL != node_comm);
- HDassert(app_topology_out);
- HDassert(!*app_topology_out);
+ assert(subfiling_config);
+ assert(MPI_COMM_NULL != comm);
+ assert(MPI_COMM_NULL != node_comm);
+ assert(app_topology_out);
+ assert(!*app_topology_out);
if (MPI_SUCCESS != (mpi_code = MPI_Comm_rank(comm, &comm_rank)))
H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Comm_rank failed", mpi_code);
@@ -1098,7 +1098,7 @@ init_app_topology(H5FD_subfiling_params_t *subfiling_config, MPI_Comm comm, MPI_
if (find_cached_topology_info(comm, subfiling_config, iocs_per_node, &app_topology) < 0)
H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL,
"can't check for cached subfiling topology object");
- HDassert(!app_topology || (app_topology->selection_type == ioc_selection_type));
+ assert(!app_topology || (app_topology->selection_type == ioc_selection_type));
if (!app_topology) {
/* Generate an ID for the application topology object */
@@ -1121,10 +1121,10 @@ init_app_topology(H5FD_subfiling_params_t *subfiling_config, MPI_Comm comm, MPI_
if (init_app_layout(app_topology, comm, node_comm) < 0)
H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "couldn't initialize application layout");
- HDassert(app_topology->app_layout);
- HDassert(app_topology->app_layout->layout);
- HDassert(app_topology->app_layout->node_ranks);
- HDassert(app_topology->app_layout->node_count > 0);
+ assert(app_topology->app_layout);
+ assert(app_topology->app_layout->layout);
+ assert(app_topology->app_layout->node_ranks);
+ assert(app_topology->app_layout->node_count > 0);
/*
* Now that the application node count has been determined, adjust the
@@ -1186,8 +1186,8 @@ get_ioc_selection_criteria_from_env(H5FD_subfiling_ioc_select_t *ioc_selection_t
char *env_value = HDgetenv(H5FD_SUBFILING_IOC_SELECTION_CRITERIA);
herr_t ret_value = SUCCEED;
- HDassert(ioc_selection_type);
- HDassert(ioc_sel_info_str);
+ assert(ioc_selection_type);
+ assert(ioc_sel_info_str);
*ioc_sel_info_str = NULL;
@@ -1259,7 +1259,7 @@ find_cached_topology_info(MPI_Comm comm, H5FD_subfiling_params_t *subf_config, l
int32_t stripe_count;
herr_t ret_value = SUCCEED;
- HDassert(subf_config);
+ assert(subf_config);
ioc_selection_type = subf_config->ioc_selection;
stripe_count = subf_config->stripe_count;
@@ -1269,7 +1269,7 @@ find_cached_topology_info(MPI_Comm comm, H5FD_subfiling_params_t *subf_config, l
int result;
int mpi_code;
- HDassert(cached_topology);
+ assert(cached_topology);
/*
* If the selection types differ, just reject the cached topology
@@ -1289,8 +1289,8 @@ find_cached_topology_info(MPI_Comm comm, H5FD_subfiling_params_t *subf_config, l
}
if (cached_topology->selection_type == SELECT_IOC_ONE_PER_NODE) {
- HDassert(iocs_per_node >= 1);
- HDassert(cached_topology->app_layout->node_count > 0);
+ assert(iocs_per_node >= 1);
+ assert(cached_topology->app_layout->node_count > 0);
/*
* If a IOCs-per-node setting was set in the environment and would
@@ -1332,10 +1332,10 @@ init_app_layout(sf_topology_t *app_topology, MPI_Comm comm, MPI_Comm node_comm)
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(app_topology);
- HDassert(!app_topology->app_layout);
- HDassert(MPI_COMM_NULL != comm);
- HDassert(MPI_COMM_NULL != node_comm);
+ assert(app_topology);
+ assert(!app_topology->app_layout);
+ assert(MPI_COMM_NULL != comm);
+ assert(MPI_COMM_NULL != node_comm);
if (NULL == (app_layout = HDcalloc(1, sizeof(*app_layout))))
H5_SUBFILING_GOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
@@ -1370,7 +1370,7 @@ init_app_layout(sf_topology_t *app_topology, MPI_Comm comm, MPI_Comm node_comm)
if (app_layout->layout[i].node_local_rank == 0)
app_layout->node_count++;
- HDassert(app_layout->node_count > 0);
+ assert(app_layout->node_count > 0);
if (NULL ==
(app_layout->node_ranks = HDmalloc((size_t)app_layout->node_count * sizeof(*app_layout->node_ranks))))
@@ -1383,7 +1383,7 @@ init_app_layout(sf_topology_t *app_topology, MPI_Comm comm, MPI_Comm node_comm)
*/
for (size_t i = 0, node_rank_index = 0; i < (size_t)app_layout->world_size; i++) {
if (app_layout->layout[i].node_local_rank == 0) {
- HDassert(node_rank_index < (size_t)app_layout->node_count);
+ assert(node_rank_index < (size_t)app_layout->node_count);
app_layout->node_ranks[node_rank_index++] = app_layout->layout[i].rank;
}
}
@@ -1428,9 +1428,9 @@ gather_topology_info(app_layout_t *app_layout, MPI_Comm comm, MPI_Comm intra_com
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(app_layout);
- HDassert(app_layout->layout);
- HDassert(MPI_COMM_NULL != comm);
+ assert(app_layout);
+ assert(app_layout->layout);
+ assert(MPI_COMM_NULL != comm);
sf_world_rank = app_layout->world_rank;
sf_world_size = app_layout->world_size;
@@ -1481,7 +1481,7 @@ gather_topology_info(app_layout_t *app_layout, MPI_Comm comm, MPI_Comm intra_com
#else
int aggr_comm_size = 0;
- HDassert(MPI_COMM_NULL != intra_comm);
+ assert(MPI_COMM_NULL != intra_comm);
/* Split the file communicator into a sub-group of one rank per node */
if (MPI_SUCCESS != (mpi_code = MPI_Comm_split(comm, node_local_rank, sf_world_rank, &aggr_comm)))
@@ -1612,12 +1612,12 @@ identify_ioc_ranks(sf_topology_t *app_topology, int rank_stride)
int max_iocs = 0;
herr_t ret_value = SUCCEED;
- HDassert(app_topology);
- HDassert(!app_topology->io_concentrators);
- HDassert(app_topology->n_io_concentrators > 0);
- HDassert(app_topology->app_layout);
- HDassert(app_topology->app_layout->layout);
- HDassert(app_topology->app_layout->node_count > 0);
+ assert(app_topology);
+ assert(!app_topology->io_concentrators);
+ assert(app_topology->n_io_concentrators > 0);
+ assert(app_topology->app_layout);
+ assert(app_topology->app_layout->layout);
+ assert(app_topology->app_layout->node_count > 0);
app_layout = app_topology->app_layout;
@@ -1638,13 +1638,13 @@ identify_ioc_ranks(sf_topology_t *app_topology, int rank_stride)
if (app_topology->n_io_concentrators > app_layout->node_count)
iocs_per_node = app_topology->n_io_concentrators / app_layout->node_count;
- HDassert(app_layout->node_ranks);
+ assert(app_layout->node_ranks);
for (size_t i = 0; i < (size_t)app_layout->node_count; i++) {
int node_index = app_layout->node_ranks[i];
int local_size = app_layout->layout[node_index].node_local_size;
- HDassert(total_ioc_count < app_topology->n_io_concentrators);
+ assert(total_ioc_count < app_topology->n_io_concentrators);
io_concentrators[total_ioc_count] = app_layout->layout[node_index++].rank;
if (app_layout->world_rank == io_concentrators[total_ioc_count]) {
@@ -1660,7 +1660,7 @@ identify_ioc_ranks(sf_topology_t *app_topology, int rank_stride)
if (j >= (size_t)local_size)
break;
- HDassert(total_ioc_count < app_topology->n_io_concentrators);
+ assert(total_ioc_count < app_topology->n_io_concentrators);
io_concentrators[total_ioc_count] = app_layout->layout[node_index++].rank;
if (app_layout->world_rank == io_concentrators[total_ioc_count]) {
@@ -1686,7 +1686,7 @@ identify_ioc_ranks(sf_topology_t *app_topology, int rank_stride)
int num_iocs_assigned = 0;
int world_size = app_layout->world_size;
- HDassert(rank_stride > 0);
+ assert(rank_stride > 0);
for (int i = 0; num_iocs_assigned < max_iocs; num_iocs_assigned++) {
int ioc_index = rank_stride * i++;
@@ -1749,15 +1749,15 @@ init_subfiling_context(subfiling_context_t *sf_context, const char *base_filenam
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(sf_context->topology == NULL);
- HDassert(sf_context->sf_context_id >= 0);
- HDassert(base_filename);
- HDassert(file_id != UINT64_MAX);
- HDassert(subfiling_config);
- HDassert(app_topology);
- HDassert(app_topology->n_io_concentrators > 0);
- HDassert(MPI_COMM_NULL != file_comm);
+ assert(sf_context);
+ assert(sf_context->topology == NULL);
+ assert(sf_context->sf_context_id >= 0);
+ assert(base_filename);
+ assert(file_id != UINT64_MAX);
+ assert(subfiling_config);
+ assert(app_topology);
+ assert(app_topology->n_io_concentrators > 0);
+ assert(MPI_COMM_NULL != file_comm);
sf_context->h5_file_id = file_id;
sf_context->sf_fids = NULL;
@@ -1888,7 +1888,7 @@ init_subfiling_context(subfiling_context_t *sf_context, const char *base_filenam
H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_BADVALUE, FAIL, "invalid subfiling stripe count (%d)",
sf_context->sf_num_subfiles);
- HDassert(sf_context->sf_num_subfiles >= app_topology->n_io_concentrators);
+ assert(sf_context->sf_num_subfiles >= app_topology->n_io_concentrators);
done:
H5_SUBFILING_FUNC_LEAVE;
@@ -1932,8 +1932,8 @@ open_subfile_with_context(subfiling_context_t *sf_context, int file_acc_flags)
{
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(sf_context->h5_file_id != UINT64_MAX);
+ assert(sf_context);
+ assert(sf_context->h5_file_id != UINT64_MAX);
/*
* Save the HDF5 file ID (e.g., inode) to subfile context mapping.
@@ -2131,13 +2131,13 @@ ioc_open_files(int64_t file_context_id, int file_acc_flags)
H5_SUBFILING_GOTO_ERROR(H5E_VFL, H5E_CANTOPENFILE, FAIL,
"couldn't get subfiling object from context ID");
- HDassert(sf_context->h5_file_id != UINT64_MAX);
- HDassert(sf_context->h5_filename);
- HDassert(sf_context->sf_fids);
- HDassert(sf_context->sf_num_subfiles > 0);
- HDassert(sf_context->sf_num_fids > 0);
- HDassert(sf_context->topology);
- HDassert(sf_context->topology->ioc_idx >= 0); /* Only IOC ranks should be here */
+ assert(sf_context->h5_file_id != UINT64_MAX);
+ assert(sf_context->h5_filename);
+ assert(sf_context->sf_fids);
+ assert(sf_context->sf_num_subfiles > 0);
+ assert(sf_context->sf_num_fids > 0);
+ assert(sf_context->topology);
+ assert(sf_context->topology->ioc_idx >= 0); /* Only IOC ranks should be here */
/* Get the basename of the full HDF5 filename */
if (H5_basename(sf_context->h5_filename, &base) < 0)
@@ -2264,10 +2264,10 @@ create_config_file(subfiling_context_t *sf_context, const char *base_filename, c
int ret = 0;
herr_t ret_value = SUCCEED;
- HDassert(sf_context);
- HDassert(base_filename);
- HDassert(config_dir);
- HDassert(subfile_dir);
+ assert(sf_context);
+ assert(base_filename);
+ assert(config_dir);
+ assert(subfile_dir);
if (sf_context->h5_file_id == UINT64_MAX)
H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "invalid HDF5 file ID %" PRIu64,
@@ -2394,11 +2394,11 @@ open_config_file(const char *base_filename, const char *config_dir, uint64_t fil
int ret = 0;
herr_t ret_value = SUCCEED;
- HDassert(base_filename);
- HDassert(config_dir);
- HDassert(file_id != UINT64_MAX);
- HDassert(mode);
- HDassert(config_file_out);
+ assert(base_filename);
+ assert(config_dir);
+ assert(file_id != UINT64_MAX);
+ assert(mode);
+ assert(config_file_out);
*config_file_out = NULL;
@@ -2466,7 +2466,7 @@ H5_get_subfiling_config_from_file(FILE *config_file, int64_t *stripe_size, int64
long config_file_len = 0;
herr_t ret_value = SUCCEED;
- HDassert(config_file);
+ assert(config_file);
if (HDfseek(config_file, 0, SEEK_END) < 0)
H5_SUBFILING_SYS_GOTO_ERROR(H5E_FILE, H5E_SEEKERROR, FAIL,
@@ -2561,8 +2561,8 @@ H5_resolve_pathname(const char *filepath, MPI_Comm comm, char **resolved_filepat
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(filepath);
- HDassert(resolved_filepath);
+ assert(filepath);
+ assert(resolved_filepath);
if (MPI_SUCCESS != (mpi_code = MPI_Comm_rank(comm, &mpi_rank)))
H5_SUBFILING_MPI_GOTO_ERROR(FAIL, "MPI_Comm_rank failed", mpi_code);