summaryrefslogtreecommitdiffstats
path: root/src/H5FDsubfiling/H5FDioc_threads.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5FDsubfiling/H5FDioc_threads.c')
-rw-r--r--src/H5FDsubfiling/H5FDioc_threads.c122
1 files changed, 61 insertions, 61 deletions
diff --git a/src/H5FDsubfiling/H5FDioc_threads.c b/src/H5FDsubfiling/H5FDioc_threads.c
index abf816d..579b686 100644
--- a/src/H5FDsubfiling/H5FDioc_threads.c
+++ b/src/H5FDsubfiling/H5FDioc_threads.c
@@ -119,7 +119,7 @@ initialize_ioc_threads(void *_sf_context)
double t_start = 0.0, t_end = 0.0;
#endif
- HDassert(sf_context);
+ assert(sf_context);
/*
* Allocate and initialize IOC data that will be passed
@@ -213,12 +213,12 @@ finalize_ioc_threads(void *_sf_context)
ioc_data_t *ioc_data = NULL;
int ret_value = 0;
- HDassert(sf_context);
- HDassert(sf_context->topology->rank_is_ioc);
+ assert(sf_context);
+ assert(sf_context->topology->rank_is_ioc);
ioc_data = sf_context->ioc_data;
if (ioc_data) {
- HDassert(0 == atomic_load(&ioc_data->sf_shutdown_flag));
+ assert(0 == atomic_load(&ioc_data->sf_shutdown_flag));
/* Shutdown the main IOC thread */
atomic_store(&ioc_data->sf_shutdown_flag, 1);
@@ -229,7 +229,7 @@ finalize_ioc_threads(void *_sf_context)
} while (0 != atomic_load(&ioc_data->sf_shutdown_flag));
/* Tear down IOC worker thread pool */
- HDassert(0 == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(0 == atomic_load(&ioc_data->sf_io_ops_pending));
hg_thread_pool_destroy(ioc_data->io_thread_pool);
hg_thread_mutex_destroy(&ioc_data->io_queue.q_mutex);
@@ -345,10 +345,10 @@ ioc_main(ioc_data_t *ioc_data)
int shutdown_requested;
int ret_value = 0;
- HDassert(ioc_data);
+ assert(ioc_data);
context = H5_get_subfiling_object(ioc_data->sf_context_id);
- HDassert(context);
+ assert(context);
/* We can't have opened any files at this point..
* The file open approach has changed so that the normal
@@ -416,7 +416,7 @@ ioc_main(ioc_data_t *ioc_data)
ioc_io_queue_add_entry(ioc_data, &wk_req);
- HDassert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
+ assert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
}
else {
struct timespec sleep_spec = {0, IOC_MAIN_SLEEP_DELAY};
@@ -501,15 +501,15 @@ handle_work_request(void *arg)
int op_ret;
hg_thread_ret_t ret_value = 0;
- HDassert(q_entry_ptr);
- HDassert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
- HDassert(q_entry_ptr->in_progress);
+ assert(q_entry_ptr);
+ assert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(q_entry_ptr->in_progress);
sf_context = H5_get_subfiling_object(file_context_id);
- HDassert(sf_context);
+ assert(sf_context);
ioc_data = sf_context->ioc_data;
- HDassert(ioc_data);
+ assert(ioc_data);
atomic_fetch_add(&ioc_data->sf_work_pending, 1);
@@ -559,14 +559,14 @@ handle_work_request(void *arg)
#ifdef H5FD_IOC_DEBUG
{
int curr_io_ops_pending = atomic_load(&ioc_data->sf_io_ops_pending);
- HDassert(curr_io_ops_pending > 0);
+ assert(curr_io_ops_pending > 0);
}
#endif
/* complete the I/O request */
ioc_io_queue_complete_entry(ioc_data, q_entry_ptr);
- HDassert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
+ assert(atomic_load(&ioc_data->sf_io_ops_pending) >= 0);
/* Check the I/O Queue to see if there are any dispatchable entries */
ioc_io_queue_dispatch_eligible_entries(ioc_data, 1);
@@ -617,7 +617,7 @@ send_ack_to_client(int ack_val, int dest_rank, int source_rank, int msg_tag, MPI
int mpi_code;
herr_t ret_value = SUCCEED;
- HDassert(ack_val > 0);
+ assert(ack_val > 0);
(void)source_rank;
@@ -697,7 +697,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
@@ -712,7 +712,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
}
sf_context = H5_get_subfiling_object(file_context_id);
- HDassert(sf_context);
+ assert(sf_context);
stripe_id = file_offset + data_size;
sf_eof = (haddr_t)(stripe_id % sf_context->sf_stripe_size);
@@ -751,7 +751,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
* allows us to distinguish between multiple concurrent
* writes from a single rank.
*/
- HDassert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
+ assert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
rcv_tag = (int)(counter % (INT_MAX - IO_TAG_BASE));
rcv_tag %= (*H5FD_IOC_tag_ub_val_ptr - IO_TAG_BASE);
rcv_tag += IO_TAG_BASE;
@@ -787,7 +787,7 @@ ioc_file_queue_write_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_
#endif
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
sf_fid = sf_context->sf_fids[subfile_idx];
#ifdef H5_SUBFILING_DEBUG
@@ -883,12 +883,12 @@ ioc_file_queue_read_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_C
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
sf_context = H5_get_subfiling_object(file_context_id);
- HDassert(sf_context);
+ assert(sf_context);
/*
* If we are using 1 subfile per IOC, we can optimize reads
@@ -952,7 +952,7 @@ ioc_file_queue_read_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_C
* reads from a single rank, which can happen when a rank
* owns multiple subfiles.
*/
- HDassert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
+ assert(H5FD_IOC_tag_ub_val_ptr && (*H5FD_IOC_tag_ub_val_ptr >= IO_TAG_BASE));
send_tag = (int)(counter % (INT_MAX - IO_TAG_BASE));
send_tag %= (*H5FD_IOC_tag_ub_val_ptr - IO_TAG_BASE);
send_tag += IO_TAG_BASE;
@@ -964,7 +964,7 @@ ioc_file_queue_read_indep(sf_work_request_t *msg, int ioc_idx, int source, MPI_C
}
/* Read data from the subfile */
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
sf_fid = sf_context->sf_fids[subfile_idx];
if (sf_fid < 0)
H5_SUBFILING_GOTO_ERROR(H5E_IO, H5E_BADVALUE, -1, "subfile file descriptor %d is invalid", sf_fid);
@@ -1107,7 +1107,7 @@ ioc_file_read_data(int fd, int64_t file_offset, void *data_buffer, int64_t data_
file_offset += bytes_read;
}
else if (bytes_read == 0) {
- HDassert(bytes_remaining > 0);
+ assert(bytes_remaining > 0);
/* end of file but not end of format address space */
HDmemset(this_buffer, 0, (size_t)bytes_remaining);
@@ -1145,7 +1145,7 @@ ioc_file_truncate(sf_work_request_t *msg)
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
ioc_idx = msg->ioc_idx;
@@ -1160,7 +1160,7 @@ ioc_file_truncate(sf_work_request_t *msg)
if (NULL == (sf_context = H5_get_subfiling_object(file_context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_CANTGET, -1, "couldn't retrieve subfiling context");
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
fd = sf_context->sf_fids[subfile_idx];
@@ -1219,7 +1219,7 @@ ioc_file_report_eof(sf_work_request_t *msg, MPI_Comm comm)
int mpi_code;
int ret_value = 0;
- HDassert(msg);
+ assert(msg);
file_context_id = msg->context_id;
source = msg->source;
@@ -1230,7 +1230,7 @@ ioc_file_report_eof(sf_work_request_t *msg, MPI_Comm comm)
if (NULL == (sf_context = H5_get_subfiling_object(file_context_id)))
H5_SUBFILING_GOTO_ERROR(H5E_FILE, H5E_CANTGET, -1, "couldn't retrieve subfiling context");
- HDassert(subfile_idx < sf_context->sf_num_fids);
+ assert(subfile_idx < sf_context->sf_num_fids);
fd = sf_context->sf_fids[subfile_idx];
@@ -1326,21 +1326,21 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr)
{
ioc_io_queue_entry_t *entry_ptr = NULL;
- HDassert(ioc_data);
- HDassert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
- HDassert(wk_req_ptr);
+ assert(ioc_data);
+ assert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
+ assert(wk_req_ptr);
entry_ptr = ioc_io_queue_alloc_entry();
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(entry_ptr);
+ assert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
HDmemcpy((void *)(&(entry_ptr->wk_req)), (const void *)wk_req_ptr, sizeof(sf_work_request_t));
/* must obtain io_queue mutex before appending */
hg_thread_mutex_lock(&ioc_data->io_queue.q_mutex);
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
entry_ptr->counter = ioc_data->io_queue.req_counter++;
@@ -1360,7 +1360,7 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr)
atomic_load(&ioc_data->sf_io_ops_pending));
#endif
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
#ifdef H5FD_IOC_COLLECT_STATS
entry_ptr->q_time = H5_now_usec();
@@ -1398,7 +1398,7 @@ ioc_io_queue_add_entry(ioc_data_t *ioc_data, sf_work_request_t *wk_req_ptr)
}
#endif
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
hg_thread_mutex_unlock(&ioc_data->io_queue.q_mutex);
@@ -1457,8 +1457,8 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
ioc_io_queue_entry_t *entry_ptr = NULL;
ioc_io_queue_entry_t *scan_ptr = NULL;
- HDassert(ioc_data);
- HDassert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
+ assert(ioc_data);
+ assert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
if (try_lock) {
if (hg_thread_mutex_try_lock(&ioc_data->io_queue.q_mutex) < 0)
@@ -1470,11 +1470,11 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
entry_ptr = ioc_data->io_queue.q_head;
/* sanity check on first element in the I/O queue */
- HDassert((entry_ptr == NULL) || (entry_ptr->prev == NULL));
+ assert((entry_ptr == NULL) || (entry_ptr->prev == NULL));
while ((entry_ptr) && (ioc_data->io_queue.num_pending > 0)) {
- HDassert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
/* Check for a get EOF or truncate operation at head of queue */
if (ioc_data->io_queue.q_head->in_progress) {
@@ -1499,7 +1499,7 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
scan_ptr = entry_ptr->prev;
- HDassert((scan_ptr == NULL) || (scan_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC));
+ assert((scan_ptr == NULL) || (scan_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC));
if ((entry_ptr->wk_req.tag == TRUNC_OP) || (entry_ptr->wk_req.tag == GET_EOF_OP)) {
@@ -1539,18 +1539,18 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
if (!conflict_detected) { /* dispatch I/O request */
- HDassert(scan_ptr == NULL);
- HDassert(!entry_ptr->in_progress);
+ assert(scan_ptr == NULL);
+ assert(!entry_ptr->in_progress);
entry_ptr->in_progress = TRUE;
- HDassert(ioc_data->io_queue.num_pending > 0);
+ assert(ioc_data->io_queue.num_pending > 0);
ioc_data->io_queue.num_pending--;
ioc_data->io_queue.num_in_progress++;
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress ==
- ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress ==
+ ioc_data->io_queue.q_len);
entry_ptr->thread_wk.func = handle_work_request;
entry_ptr->thread_wk.args = entry_ptr;
@@ -1583,7 +1583,7 @@ ioc_io_queue_dispatch_eligible_entries(ioc_data_t *ioc_data, hbool_t try_lock)
entry_ptr = entry_ptr->next;
}
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
hg_thread_mutex_unlock(&ioc_data->io_queue.q_mutex);
} /* ioc_io_queue_dispatch_eligible_entries() */
@@ -1616,16 +1616,16 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt
uint64_t execution_time;
#endif
- HDassert(ioc_data);
- HDassert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(ioc_data);
+ assert(ioc_data->io_queue.magic == H5FD_IOC__IO_Q_MAGIC);
+ assert(entry_ptr);
+ assert(entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
/* must obtain io_queue mutex before deleting and updating stats */
hg_thread_mutex_lock(&ioc_data->io_queue.q_mutex);
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
- HDassert(ioc_data->io_queue.num_in_progress > 0);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_in_progress > 0);
if (entry_ptr->wk_ret < 0)
ioc_data->io_queue.num_failed++;
@@ -1634,7 +1634,7 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt
ioc_data->io_queue.num_in_progress--;
- HDassert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
+ assert(ioc_data->io_queue.num_pending + ioc_data->io_queue.num_in_progress == ioc_data->io_queue.q_len);
atomic_fetch_sub(&ioc_data->sf_io_ops_pending, 1);
@@ -1652,10 +1652,10 @@ ioc_io_queue_complete_entry(ioc_data_t *ioc_data, ioc_io_queue_entry_t *entry_pt
* there aren't other operations in progress
*/
if ((entry_ptr->wk_req.tag == GET_EOF_OP) || (entry_ptr->wk_req.tag == TRUNC_OP))
- HDassert(ioc_data->io_queue.num_in_progress == 0);
+ assert(ioc_data->io_queue.num_in_progress == 0);
#endif
- HDassert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
+ assert(ioc_data->io_queue.q_len == atomic_load(&ioc_data->sf_io_ops_pending));
#ifdef H5FD_IOC_COLLECT_STATS
/* Compute the queued and execution time */
@@ -1696,10 +1696,10 @@ static void
ioc_io_queue_free_entry(ioc_io_queue_entry_t *q_entry_ptr)
{
/* use assertions for error checking, since the following should never fail. */
- HDassert(q_entry_ptr);
- HDassert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
- HDassert(q_entry_ptr->next == NULL);
- HDassert(q_entry_ptr->prev == NULL);
+ assert(q_entry_ptr);
+ assert(q_entry_ptr->magic == H5FD_IOC__IO_Q_ENTRY_MAGIC);
+ assert(q_entry_ptr->next == NULL);
+ assert(q_entry_ptr->prev == NULL);
q_entry_ptr->magic = 0;