summaryrefslogtreecommitdiffstats
path: root/src/H5Cmpio.c
diff options
context:
space:
mode:
authorDana Robinson <43805+derobins@users.noreply.github.com>2023-06-28 14:31:56 (GMT)
committerGitHub <noreply@github.com>2023-06-28 14:31:56 (GMT)
commit7a44581a84778a1346a2fd5b6cca7d9db905a321 (patch)
tree44ea9c2d1b471eb227698abe8499c34cfa6d47d2 /src/H5Cmpio.c
parent622fcbd13881fbc58bbeaed3062583b759f5e864 (diff)
downloadhdf5-7a44581a84778a1346a2fd5b6cca7d9db905a321.zip
hdf5-7a44581a84778a1346a2fd5b6cca7d9db905a321.tar.gz
hdf5-7a44581a84778a1346a2fd5b6cca7d9db905a321.tar.bz2
Rename HDassert() to assert() (#3191)
* Change HDassert to assert * Fix bin/make_err
Diffstat (limited to 'src/H5Cmpio.c')
-rw-r--r--src/H5Cmpio.c222
1 files changed, 111 insertions, 111 deletions
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 46f1ba7..308a7f3 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -183,12 +183,12 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
FUNC_ENTER_NOAPI(FAIL)
/* Sanity checks */
- HDassert(cache_ptr != NULL);
- HDassert(num_candidates > 0);
- HDassert((!cache_ptr->slist_enabled) || (num_candidates <= cache_ptr->slist_len));
- HDassert(candidates_list_ptr != NULL);
- HDassert(0 <= mpi_rank);
- HDassert(mpi_rank < mpi_size);
+ assert(cache_ptr != NULL);
+ assert(num_candidates > 0);
+ assert((!cache_ptr->slist_enabled) || (num_candidates <= cache_ptr->slist_len));
+ assert(candidates_list_ptr != NULL);
+ assert(0 <= mpi_rank);
+ assert(mpi_rank < mpi_size);
/* Initialize the entries_to_flush and entries_to_clear arrays */
HDmemset(entries_to_flush, 0, sizeof(entries_to_flush));
@@ -209,7 +209,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
if (f->shared->coll_md_write) {
/* Sanity check */
- HDassert(NULL == cache_ptr->coll_write_list);
+ assert(NULL == cache_ptr->coll_write_list);
/* Create skip list of entries for collective write */
if (NULL == (cache_ptr->coll_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
@@ -243,7 +243,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
candidate_assignment_table[u] = candidate_assignment_table[u - 1] + n;
} /* end else */
} /* end else */
- HDassert((candidate_assignment_table[mpi_size - 1] + n) == num_candidates);
+ assert((candidate_assignment_table[mpi_size - 1] + n) == num_candidates);
#ifdef H5C_DO_SANITY_CHECKS
/* Verify that the candidate assignment table has the expected form */
@@ -253,9 +253,9 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
a = candidate_assignment_table[u] - candidate_assignment_table[u - 1];
b = candidate_assignment_table[u + 1] - candidate_assignment_table[u];
- HDassert(n + 1 >= a);
- HDassert(a >= b);
- HDassert(b >= n);
+ assert(n + 1 >= a);
+ assert(a >= b);
+ assert(b >= n);
}
#endif /* H5C_DO_SANITY_CHECKS */
@@ -279,7 +279,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
for (u = 0; u < num_candidates; u++) {
addr = candidates_list_ptr[u];
- HDassert(H5_addr_defined(addr));
+ assert(H5_addr_defined(addr));
#ifdef H5C_DO_SANITY_CHECKS
if (u > 0) {
@@ -305,10 +305,10 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?")
/* Sanity checks */
- HDassert(entry_ptr->ring >= H5C_RING_USER);
- HDassert(entry_ptr->ring <= H5C_RING_SB);
- HDassert(!entry_ptr->flush_immediately);
- HDassert(!entry_ptr->clear_on_unprotect);
+ assert(entry_ptr->ring >= H5C_RING_USER);
+ assert(entry_ptr->ring <= H5C_RING_SB);
+ assert(!entry_ptr->flush_immediately);
+ assert(!entry_ptr->clear_on_unprotect);
/* Determine whether the entry is to be cleared or flushed,
* and mark it accordingly. We will scan the protected and
@@ -350,8 +350,8 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
n += entries_to_clear[u];
} /* end if */
- HDassert((unsigned)m == total_entries_to_flush);
- HDassert(n == total_entries_to_clear);
+ assert((unsigned)m == total_entries_to_flush);
+ assert(n == total_entries_to_clear);
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
@@ -375,7 +375,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
/* If we've deferred writing to do it collectively, take care of that now */
if (f->shared->coll_md_write) {
/* Sanity check */
- HDassert(cache_ptr->coll_write_list);
+ assert(cache_ptr->coll_write_list);
/* Write collective list */
if (H5C__collective_write(f) < 0)
@@ -419,7 +419,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
FUNC_ENTER_NOAPI(FAIL)
- HDassert(cache_ptr != NULL);
+ assert(cache_ptr != NULL);
/* As a sanity check, set space needed to the dirty_index_size. This
* should be the sum total of the sizes of all the dirty entries
@@ -428,14 +428,14 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
*/
space_needed = cache_ptr->dirty_index_size;
- HDassert((!cache_ptr->slist_enabled) || (space_needed == cache_ptr->slist_size));
+ assert((!cache_ptr->slist_enabled) || (space_needed == cache_ptr->slist_size));
/* We shouldn't have any protected entries at this point, but it is
* possible that some dirty entries may reside on the pinned list.
*/
- HDassert(cache_ptr->dirty_index_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
- HDassert((!cache_ptr->slist_enabled) ||
- (cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len)));
+ assert(cache_ptr->dirty_index_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
+ assert((!cache_ptr->slist_enabled) ||
+ (cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len)));
if (space_needed > 0) {
H5C_cache_entry_t *entry_ptr;
@@ -443,7 +443,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
size_t nominated_entries_size = 0;
haddr_t nominated_addr;
- HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
+ assert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
@@ -452,11 +452,11 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
while ((nominated_entries_size < space_needed) &&
((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL)) {
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert(entry_ptr->ro_ref_count == 0);
- HDassert(entry_ptr->is_dirty);
- HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
+ assert(!(entry_ptr->is_protected));
+ assert(!(entry_ptr->is_read_only));
+ assert(entry_ptr->ro_ref_count == 0);
+ assert(entry_ptr->is_dirty);
+ assert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
@@ -468,7 +468,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
entry_ptr = entry_ptr->aux_prev;
} /* end while */
- HDassert(entry_ptr == NULL);
+ assert(entry_ptr == NULL);
/* it is possible that there are some dirty entries on the
* protected entry list as well -- scan it too if necessary
@@ -478,11 +478,11 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL)) {
if (entry_ptr->is_dirty) {
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert(entry_ptr->ro_ref_count == 0);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ assert(!(entry_ptr->is_protected));
+ assert(!(entry_ptr->is_read_only));
+ assert(entry_ptr->ro_ref_count == 0);
+ assert(entry_ptr->is_dirty);
+ assert(entry_ptr->in_slist);
nominated_addr = entry_ptr->addr;
if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
@@ -495,8 +495,8 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
entry_ptr = entry_ptr->next;
} /* end while */
- HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count == cache_ptr->slist_len));
- HDassert(nominated_entries_size == space_needed);
+ assert((!cache_ptr->slist_enabled) || (nominated_entries_count == cache_ptr->slist_len));
+ assert(nominated_entries_size == space_needed);
} /* end if */
done:
@@ -529,7 +529,7 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
FUNC_ENTER_NOAPI(FAIL)
- HDassert(cache_ptr != NULL);
+ assert(cache_ptr != NULL);
/* compute the number of bytes (if any) that must be flushed to get the
* cache back within its min clean constraints.
@@ -555,7 +555,7 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
- HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
+ assert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
@@ -566,11 +566,11 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
(entry_ptr != NULL) && (!entry_ptr->flush_me_last)) {
haddr_t nominated_addr;
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert(entry_ptr->ro_ref_count == 0);
- HDassert(entry_ptr->is_dirty);
- HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
+ assert(!(entry_ptr->is_protected));
+ assert(!(entry_ptr->is_read_only));
+ assert(entry_ptr->ro_ref_count == 0);
+ assert(entry_ptr->is_dirty);
+ assert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
if (H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
@@ -582,9 +582,9 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
entry_ptr = entry_ptr->aux_prev;
} /* end while */
- HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count <= cache_ptr->slist_len));
- HDassert(nominated_entries_size <= cache_ptr->dirty_index_size);
- HDassert(nominated_entries_size >= space_needed);
+ assert((!cache_ptr->slist_enabled) || (nominated_entries_count <= cache_ptr->slist_len));
+ assert(nominated_entries_size <= cache_ptr->dirty_index_size);
+ assert(nominated_entries_size >= space_needed);
} /* end if */
done:
@@ -644,13 +644,13 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr
FUNC_ENTER_NOAPI(FAIL)
- HDassert(f);
- HDassert(f->shared);
+ assert(f);
+ assert(f->shared);
cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
+ assert(cache_ptr);
- HDassert(ce_array_len > 0);
- HDassert(ce_array_ptr != NULL);
+ assert(ce_array_len > 0);
+ assert(ce_array_ptr != NULL);
#ifdef H5C_DO_EXTREME_SANITY_CHECKS
if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
@@ -678,7 +678,7 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#endif /* H5C_DO_SANITY_CHECKS */
- HDassert(H5_addr_defined(addr));
+ assert(H5_addr_defined(addr));
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL);
@@ -761,7 +761,7 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr
} /* end while */
#ifdef H5C_DO_SANITY_CHECKS
- HDassert(entries_cleared == other_entries_marked);
+ assert(entries_cleared == other_entries_marked);
#endif /* H5C_DO_SANITY_CHECKS */
/* It is also possible that some of the cleared entries are on the
@@ -792,11 +792,11 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr
} /* end while */
#ifdef H5C_DO_SANITY_CHECKS
- HDassert(entries_cleared == pinned_entries_marked + other_entries_marked);
- HDassert(entries_cleared + protected_entries_marked == ce_array_len);
+ assert(entries_cleared == pinned_entries_marked + other_entries_marked);
+ assert(entries_cleared + protected_entries_marked == ce_array_len);
#endif /* H5C_DO_SANITY_CHECKS */
- HDassert((entries_cleared == ce_array_len) || ((ce_array_len - entries_cleared) <= cache_ptr->pl_len));
+ assert((entries_cleared == ce_array_len) || ((ce_array_len - entries_cleared) <= cache_ptr->pl_len));
#ifdef H5C_DO_SANITY_CHECKS
u = 0;
@@ -806,7 +806,7 @@ H5C_mark_entries_as_clean(H5F_t *f, unsigned ce_array_len, haddr_t *ce_array_ptr
u++;
entry_ptr = entry_ptr->next;
}
- HDassert((entries_cleared + u) == ce_array_len);
+ assert((entries_cleared + u) == ce_array_len);
#endif /* H5C_DO_SANITY_CHECKS */
done:
@@ -851,7 +851,7 @@ H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
H5C_cache_entry_t *prev_ptr = entry_ptr->coll_prev;
/* Sanity check */
- HDassert(entry_ptr->coll_access);
+ assert(entry_ptr->coll_access);
/* Mark entry as independent */
entry_ptr->coll_access = FALSE;
@@ -898,10 +898,10 @@ H5C__collective_write(H5F_t *f)
FUNC_ENTER_PACKAGE
/* Sanity checks */
- HDassert(f != NULL);
+ assert(f != NULL);
cache_ptr = f->shared->cache;
- HDassert(cache_ptr != NULL);
- HDassert(cache_ptr->coll_write_list != NULL);
+ assert(cache_ptr != NULL);
+ assert(cache_ptr->coll_write_list != NULL);
/* Get original transfer mode */
if (H5CX_get_io_xfer_mode(&orig_xfer_mode) < 0)
@@ -932,12 +932,12 @@ H5C__collective_write(H5F_t *f)
/* Fill arrays */
node = H5SL_first(cache_ptr->coll_write_list);
- HDassert(node);
+ assert(node);
if (NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
/* Set up initial array position & buffer base address */
- HDassert(entry_ptr->type);
+ assert(entry_ptr->type);
base_buf = entry_ptr->image_ptr;
addrs[0] = entry_ptr->addr;
sizes[0] = entry_ptr->size;
@@ -955,7 +955,7 @@ H5C__collective_write(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
/* Set up array position */
- HDassert(entry_ptr->type);
+ assert(entry_ptr->type);
addrs[i] = entry_ptr->addr;
sizes[i] = entry_ptr->size;
bufs[i] = entry_ptr->image_ptr;
@@ -1048,22 +1048,22 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
FUNC_ENTER_PACKAGE
- HDassert(f);
- HDassert(f->shared);
+ assert(f);
+ assert(f->shared);
cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->slist_ptr);
+ assert(cache_ptr);
+ assert(cache_ptr->slist_ptr);
- HDassert(entries_to_flush[H5C_RING_UNDEFINED] == 0);
- HDassert(entries_to_clear[H5C_RING_UNDEFINED] == 0);
+ assert(entries_to_flush[H5C_RING_UNDEFINED] == 0);
+ assert(entries_to_clear[H5C_RING_UNDEFINED] == 0);
#ifdef H5C_DO_SANITY_CHECKS
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ assert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ assert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ assert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ assert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ assert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ assert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
index_len += cache_ptr->index_ring_len[i];
@@ -1075,12 +1075,12 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
slist_size += cache_ptr->slist_ring_size[i];
} /* end for */
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
+ assert(cache_ptr->index_len == index_len);
+ assert(cache_ptr->index_size == index_size);
+ assert(cache_ptr->clean_index_size == clean_index_size);
+ assert(cache_ptr->dirty_index_size == dirty_index_size);
+ assert(cache_ptr->slist_len == slist_len);
+ assert(cache_ptr->slist_size == slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
#ifdef H5C_DO_EXTREME_SANITY_CHECKS
@@ -1166,13 +1166,13 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
FUNC_ENTER_PACKAGE
/* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
+ assert(f);
+ assert(f->shared);
cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->slist_ptr);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
+ assert(cache_ptr);
+ assert(cache_ptr->slist_ptr);
+ assert(ring > H5C_RING_UNDEFINED);
+ assert(ring < H5C_RING_NTYPES);
#ifdef H5C_DO_EXTREME_SANITY_CHECKS
if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
@@ -1202,7 +1202,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
H5C_cache_entry_t *next_ptr;
/* Entries in the LRU must not have flush dependency children */
- HDassert(entry_ptr->flush_dep_nchildren == 0);
+ assert(entry_ptr->flush_dep_nchildren == 0);
/* Remember dirty state of entry to advance to */
if (entry_ptr->prev != NULL)
@@ -1212,7 +1212,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
if (entry_ptr->ring == ring) {
/* If this process needs to clear this entry. */
if (entry_ptr->clear_on_unprotect) {
- HDassert(entry_ptr->is_dirty);
+ assert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
op_ptr = entry_ptr;
@@ -1226,7 +1226,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
entries_cleared++;
} /* end if */
else if (entry_ptr->flush_immediately) {
- HDassert(entry_ptr->is_dirty);
+ assert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
op_ptr = entry_ptr;
@@ -1299,13 +1299,13 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
* present. Hence the following assertion which should be
* removed if the above changes.
*/
- HDassert(!restart_scan);
- HDassert(entry_ptr->is_dirty == prev_is_dirty);
- HDassert(entry_ptr->next == next_ptr);
- HDassert(!entry_ptr->is_protected);
- HDassert(!entry_ptr->is_pinned);
+ assert(!restart_scan);
+ assert(entry_ptr->is_dirty == prev_is_dirty);
+ assert(entry_ptr->next == next_ptr);
+ assert(!entry_ptr->is_protected);
+ assert(!entry_ptr->is_pinned);
- HDassert(FALSE); /* see comment above */
+ assert(FALSE); /* see comment above */
restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
@@ -1344,7 +1344,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
H5C_cache_entry_t *prev_ptr;
hbool_t next_is_dirty = FALSE;
- HDassert(entry_ptr->is_pinned);
+ assert(entry_ptr->is_pinned);
/* Remember dirty state of entry to advance to */
if (entry_ptr->next != NULL)
@@ -1352,7 +1352,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
if (entry_ptr->ring == ring && entry_ptr->flush_dep_ndirty_children == 0) {
if (entry_ptr->clear_on_unprotect) {
- HDassert(entry_ptr->is_dirty);
+ assert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
op_ptr = entry_ptr;
@@ -1364,7 +1364,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
progress = TRUE;
} /* end if */
else if (entry_ptr->flush_immediately) {
- HDassert(entry_ptr->is_dirty);
+ assert(entry_ptr->is_dirty);
/* Set entry and flags for operation */
op_ptr = entry_ptr;
@@ -1424,11 +1424,11 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
* over from the head.
*/
- HDassert(!restart_scan);
- HDassert(entry_ptr->is_dirty == next_is_dirty);
- HDassert(entry_ptr->prev == prev_ptr);
- HDassert(!entry_ptr->is_protected);
- HDassert(entry_ptr->is_pinned);
+ assert(!restart_scan);
+ assert(entry_ptr->is_dirty == next_is_dirty);
+ assert(entry_ptr->prev == prev_ptr);
+ assert(!entry_ptr->is_protected);
+ assert(entry_ptr->is_pinned);
/* This code should be un-reachable at present,
* as all the operations by entries on flush that could cause
@@ -1436,7 +1436,7 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
* present. Hence the following assertion which should be
* removed if the above changes.
*/
- HDassert(FALSE);
+ assert(FALSE);
restart_scan = FALSE;
@@ -1460,14 +1460,14 @@ H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned entries_to_flu
*/
#ifdef H5C_DO_SANITY_CHECKS
- HDassert(init_index_len == cache_ptr->index_len);
+ assert(init_index_len == cache_ptr->index_len);
#endif /* H5C_DO_SANITY_CHECKS */
if (entries_flushed != entries_to_flush || entries_cleared != entries_to_clear) {
entry_ptr = cache_ptr->il_head;
while (entry_ptr != NULL) {
- HDassert(!entry_ptr->clear_on_unprotect || (entry_ptr->ring > ring));
- HDassert(!entry_ptr->flush_immediately || (entry_ptr->ring > ring));
+ assert(!entry_ptr->clear_on_unprotect || (entry_ptr->ring > ring));
+ assert(!entry_ptr->flush_immediately || (entry_ptr->ring > ring));
entry_ptr = entry_ptr->il_next;
} /* end while */