summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <quincey@koziol.cc>2023-05-09 20:19:18 (GMT)
committerGitHub <noreply@github.com>2023-05-09 20:19:18 (GMT)
commit1a89fa23fc017d498c6add662d7c3cb707be511f (patch)
treec24a9e2f67b59da880f66d2e67f5ffba63f2bc62 /src
parent80cf4066473736d4c050a9ac47a43551ac3657a5 (diff)
downloadhdf5-1a89fa23fc017d498c6add662d7c3cb707be511f.zip
hdf5-1a89fa23fc017d498c6add662d7c3cb707be511f.tar.gz
hdf5-1a89fa23fc017d498c6add662d7c3cb707be511f.tar.bz2
More code duplication reduction (#2930)
* Add failure value where it's missing from 1+ macros. Clean up whitespace / continuation characters ('\'). Made hash-table macros generic for use in both the package header and test header. Remove duplicated copy & pasted macros (by hoisting difference into #ifdef'd macro). Updated and re-flowed comments to read better. Also clean up a few compiler warnings in production builds. Signed-off-by: Quincey Koziol <quincey@koziol.cc> * Committing clang-format changes * Remove unused variable warning in H5C.c (#2844) * Remove trailing /* NDEBUG */ comment from #endif's * Committing clang-format changes --------- Signed-off-by: Quincey Koziol <quincey@koziol.cc> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
Diffstat (limited to 'src')
-rw-r--r--src/H5C.c58
-rw-r--r--src/H5Cimage.c110
-rw-r--r--src/H5Cmpio.c14
-rw-r--r--src/H5Cpkg.h3551
4 files changed, 1523 insertions, 2210 deletions
diff --git a/src/H5C.c b/src/H5C.c
index a0f80f1..b80ed98 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -127,7 +127,7 @@ static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t add
#ifndef NDEBUG
static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry,
const H5C_cache_entry_t *base_entry);
-#endif /* NDEBUG */
+#endif
/*********************/
/* Package Variables */
@@ -410,7 +410,7 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id,
#ifndef NDEBUG
cache_ptr->get_entry_ptr_from_addr_counter = 0;
-#endif /* NDEBUG */
+#endif
/* Set return value */
ret_value = cache_ptr;
@@ -454,9 +454,9 @@ void
H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr,
#ifndef NDEBUG
int32_t version,
-#else /* NDEBUG */
+#else
int32_t H5_ATTR_UNUSED version,
-#endif /* NDEBUG */
+#endif
double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size,
size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size)
{
@@ -732,7 +732,7 @@ H5C_dest(H5F_t *f)
#endif /* H5C_DO_SANITY_CHECKS */
cache_ptr->magic = 0;
-#endif /* NDEBUG */
+#endif
cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
@@ -1216,7 +1216,7 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
entry_ptr->prefetched_dirty = FALSE;
#ifndef NDEBUG /* debugging field */
entry_ptr->serialization_count = 0;
-#endif /* NDEBUG */
+#endif
/* initialize tag list fields */
entry_ptr->tl_next = NULL;
@@ -1500,7 +1500,7 @@ H5C_mark_entry_clean(void *_thing)
if (was_dirty)
H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL)
if (entry_ptr->in_slist)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
/* Update stats for entry being marked clean */
H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
@@ -1703,7 +1703,7 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
if (entry_ptr->in_slist) {
HDassert(cache_ptr->slist_ptr);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
} /* end if */
} /* end if */
@@ -2747,7 +2747,7 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_sli
node_ptr = H5SL_first(cache_ptr->slist_ptr);
while (node_ptr != NULL) {
entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
node_ptr = H5SL_first(cache_ptr->slist_ptr);
}
}
@@ -3435,8 +3435,8 @@ H5C_create_flush_dependency(void *parent_thing, void *child_thing)
for (u = 0; u < child_entry->flush_dep_nparents; u++)
HDassert(child_entry->flush_dep_parent[u] != parent_entry);
- } /* end block */
-#endif /* NDEBUG */
+ } /* end block */
+#endif
/* More sanity checks */
if (child_entry == parent_entry)
@@ -3529,7 +3529,7 @@ H5C_create_flush_dependency(void *parent_thing, void *child_thing)
HDassert(child_entry->flush_dep_parent_nalloc > 0);
#ifndef NDEBUG
H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
-#endif /* NDEBUG */
+#endif
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -4859,7 +4859,7 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
HDassert(cache_ptr->pl_size == 0);
HDassert(cache_ptr->LRU_list_len == 0);
HDassert(cache_ptr->LRU_list_size == 0);
-#endif /* NDEBUG */
+#endif
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -5884,7 +5884,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
if (entry_ptr->in_slist && del_from_slist_on_destroy)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL)
#ifdef H5_HAVE_PARALLEL
/* Check for collective read access flag */
@@ -5916,7 +5916,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* Hence no differentiation between them.
*/
H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL)
/* mark the entry as clean and update the index for
* entry clean. Also, call the clear callback
@@ -6005,7 +6005,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
HDassert(curr_len == entry_ptr->size);
}
-#endif /* NDEBUG */
+#endif
/* If the file space free size callback is defined, use
* it to get the size of the block of file space to free.
@@ -6508,7 +6508,7 @@ H5C__load_entry(H5F_t *f,
entry->prefetched_dirty = FALSE;
#ifndef NDEBUG /* debugging field */
entry->serialization_count = 0;
-#endif /* NDEBUG */
+#endif
/* initialize tag list fields */
entry->tl_next = NULL;
@@ -6579,8 +6579,10 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
H5C_cache_entry_t *entry_ptr;
H5C_cache_entry_t *prev_ptr;
H5C_cache_entry_t *next_ptr;
- uint32_t num_corked_entries = 0;
- herr_t ret_value = SUCCEED; /* Return value */
+#ifndef NDEBUG
+ uint32_t num_corked_entries = 0;
+#endif
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -6628,8 +6630,10 @@ H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
/* Skip "dirty" corked entries. */
- num_corked_entries = num_corked_entries + 1;
- didnt_flush_entry = TRUE;
+#ifndef NDEBUG
+ ++num_corked_entries;
+#endif
+ didnt_flush_entry = TRUE;
}
else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress &&
!entry_ptr->prefetched_dirty) {
@@ -7475,7 +7479,7 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_en
FUNC_LEAVE_NOAPI_VOID
} /* H5C__assert_flush_dep_nocycle() */
-#endif /* NDEBUG */
+#endif
/*-------------------------------------------------------------------------
* Function: H5C__serialize_cache
@@ -7587,7 +7591,7 @@ H5C__serialize_cache(H5F_t *f)
scan_ptr = scan_ptr->il_next;
} /* end while */
} /* end block */
-#endif /* NDEBUG */
+#endif
/* set cache_ptr->serialization_in_progress to TRUE, and back
* to FALSE at the end of the function. Must maintain this flag
@@ -7653,7 +7657,7 @@ H5C__serialize_cache(H5F_t *f)
scan_ptr = scan_ptr->il_next;
} /* end while */
} /* end block */
-#endif /* NDEBUG */
+#endif
done:
cache_ptr->serialization_in_progress = FALSE;
@@ -7830,7 +7834,7 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
#ifndef NDEBUG
/* Increment serialization counter (to detect multiple serializations) */
entry_ptr->serialization_count++;
-#endif /* NDEBUG */
+#endif
} /* end if */
} /* end if */
@@ -7899,7 +7903,7 @@ H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
#ifndef NDEBUG
/* Increment serialization counter (to detect multiple serializations) */
entry_ptr->serialization_count++;
-#endif /* NDEBUG */
+#endif
} /* end if */
} /* end if */
else {
@@ -8119,7 +8123,7 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
if (entry_ptr->addr == old_addr) {
/* Delete the entry from the hash table and the slist */
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
/* Update the entry for its new address */
entry_ptr->addr = new_addr;
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index b8f46f1..6d211a4 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -137,7 +137,7 @@ static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, c
#ifndef NDEBUG /* only used in assertions */
static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf,
unsigned entry_num);
-#endif /* NDEBUG */ /* only used in assertions */
+#endif
static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr,
H5C_cache_entry_t **fd_children);
static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf);
@@ -401,8 +401,8 @@ H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr)
fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(fake_cache_ptr->image_entries);
fake_cache_ptr = (H5C_t *)H5MM_xfree(fake_cache_ptr);
- } /* end block */
-#endif /* NDEBUG */
+ } /* end block */
+#endif
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -702,7 +702,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t
ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty;
#ifndef NDEBUG /* debugging field */
ds_entry_ptr->serialization_count = 0;
-#endif /* NDEBUG */
+#endif
H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr);
@@ -746,7 +746,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t
H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL);
HDassert(NULL == pf_entry_ptr);
-#endif /* NDEBUG */
+#endif
/* Insert the deserialized entry into the cache. */
H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL)
@@ -797,7 +797,7 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t
} /* end while */
HDassert(found);
}
-#endif /* NDEBUG */
+#endif
if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency")
@@ -2007,7 +2007,7 @@ H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__decode_cache_image_entry() */
-#endif /* NDEBUG */
+#endif
/*-------------------------------------------------------------------------
* Function: H5C__destroy_pf_entry_child_flush_deps()
@@ -2039,10 +2039,12 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e
H5C_cache_entry_t **fd_children)
{
H5C_cache_entry_t *entry_ptr;
- unsigned entries_visited = 0;
- int fd_children_found = 0;
- hbool_t found;
- herr_t ret_value = SUCCEED; /* Return value */
+#ifndef NDEBUG
+ unsigned entries_visited = 0;
+#endif
+ int fd_children_found = 0;
+ hbool_t found;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -2119,11 +2121,13 @@ H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_e
u++;
} /* end while */
HDassert(found);
-#endif /* NDEBUG */
+#endif
} /* end if */
} /* end if */
+#ifndef NDEBUG
entries_visited++;
+#endif
entry_ptr = entry_ptr->il_next;
} /* end while */
@@ -2374,12 +2378,14 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
{
H5C_cache_entry_t *entry_ptr;
H5C_cache_entry_t *parent_ptr;
- unsigned entries_removed_from_image = 0;
- unsigned external_parent_fd_refs_removed = 0;
- unsigned external_child_fd_refs_removed = 0;
- hbool_t done = FALSE;
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED;
+#ifndef NDEBUG
+ unsigned entries_removed_from_image = 0;
+ unsigned external_parent_fd_refs_removed = 0;
+ unsigned external_child_fd_refs_removed = 0;
+#endif
+ hbool_t done = FALSE;
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_PACKAGE
@@ -2415,7 +2421,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
entry_ptr->include_in_image) {
/* Must remove child from image -- only do this once */
+#ifndef NDEBUG
entries_removed_from_image++;
+#endif
entry_ptr->include_in_image = FALSE;
} /* end if */
} /* for */
@@ -2458,7 +2466,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
parent_ptr->fd_dirty_child_count--;
} /* end if */
+#ifndef NDEBUG
external_child_fd_refs_removed++;
+#endif
} /* end if */
} /* for */
} /* end if */
@@ -2483,7 +2493,9 @@ H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
HDassert(parent_ptr->addr == entry_ptr->fd_parent_addrs[u]);
entry_ptr->fd_parent_addrs[u] = HADDR_UNDEF;
+#ifndef NDEBUG
external_parent_fd_refs_removed++;
+#endif
} /* end if */
} /* for */
@@ -2650,10 +2662,12 @@ static herr_t
H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr)
{
H5C_cache_entry_t *entry_ptr;
- H5C_image_entry_t *image_entries = NULL;
- uint32_t entries_visited = 0;
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5C_image_entry_t *image_entries = NULL;
+#ifndef NDEBUG
+ uint32_t entries_visited = 0;
+#endif
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -2736,7 +2750,9 @@ H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr)
HDassert(u <= cache_ptr->num_entries_in_image);
} /* end if */
+#ifndef NDEBUG
entries_visited++;
+#endif
entry_ptr = entry_ptr->il_next;
} /* end while */
@@ -2789,14 +2805,16 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
{
H5C_cache_entry_t *entry_ptr;
hbool_t include_in_image;
- unsigned entries_visited = 0;
- int lru_rank = 1;
- uint32_t num_entries_tentatively_in_image = 0;
- uint32_t num_entries_in_image = 0;
- size_t image_len;
- size_t entry_header_len;
- size_t fd_parents_list_len;
- herr_t ret_value = SUCCEED; /* Return value */
+ int lru_rank = 1;
+#ifndef NDEBUG
+ unsigned entries_visited = 0;
+ uint32_t num_entries_tentatively_in_image = 0;
+#endif
+ uint32_t num_entries_in_image = 0;
+ size_t image_len;
+ size_t entry_header_len;
+ size_t fd_parents_list_len;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -2897,10 +2915,14 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
entry_ptr->fd_dirty_child_count = entry_ptr->flush_dep_ndirty_children;
} /* end if */
+#ifndef NDEBUG
num_entries_tentatively_in_image++;
+#endif
} /* end if */
+#ifndef NDEBUG
entries_visited++;
+#endif
entry_ptr = entry_ptr->il_next;
} /* end while */
HDassert(entries_visited == cache_ptr->index_len);
@@ -2931,12 +2953,14 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
if (H5C__prep_for_file_close__compute_fd_heights(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?")
- /* At this point, all entries that will appear in the cache
- * image should be marked correctly. Compute the size of the
- * cache image.
- */
+ /* At this point, all entries that will appear in the cache
+ * image should be marked correctly. Compute the size of the
+ * cache image.
+ */
+#ifndef NDEBUG
entries_visited = 0;
- entry_ptr = cache_ptr->il_head;
+#endif
+ entry_ptr = cache_ptr->il_head;
while (entry_ptr != NULL) {
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
@@ -2950,7 +2974,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
num_entries_in_image++;
} /* end if */
+#ifndef NDEBUG
entries_visited++;
+#endif
entry_ptr = entry_ptr->il_next;
} /* end while */
HDassert(entries_visited == cache_ptr->index_len);
@@ -2968,7 +2994,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
#endif
cache_ptr->num_entries_in_image = num_entries_in_image;
- entries_visited = 0;
+#ifndef NDEBUG
+ entries_visited = 0;
+#endif
/* Now scan the LRU list to set the lru_rank fields of all entries
* on the LRU.
@@ -3001,7 +3029,9 @@ H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
lru_rank++;
} /* end else-if */
+#ifndef NDEBUG
entries_visited++;
+#endif
entry_ptr = entry_ptr->next;
} /* end while */
HDassert(entries_visited == cache_ptr->LRU_list_len);
@@ -3185,8 +3215,8 @@ H5C__reconstruct_cache_contents(H5F_t *f, H5C_t *cache_ptr)
* we add code to store and restore adaptive resize status.
*/
HDassert(lru_rank_holes <= H5C__MAX_EPOCH_MARKERS);
- } /* end block */
-#endif /* NDEBUG */
+ } /* end block */
+#endif
/* Check to see if the cache is oversize, and evict entries as
* necessary to remain within limits.
@@ -3237,7 +3267,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b
hbool_t in_lru = FALSE;
hbool_t is_fd_parent = FALSE;
hbool_t is_fd_child = FALSE;
-#endif /* NDEBUG */ /* only used in assertions */
+#endif
const uint8_t *p;
hbool_t file_is_rw;
H5C_cache_entry_t *ret_value = NULL; /* Return value */
@@ -3274,7 +3304,7 @@ H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr, const uint8_t **b
is_fd_parent = TRUE;
if (flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG)
is_fd_child = TRUE;
-#endif /* NDEBUG */ /* only used in assertions */
+#endif
/* Force dirty entries to clean if the file read only -- must do
* this as otherwise the cache will attempt to write them on file
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index a92ac10..5822746 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -159,10 +159,12 @@ herr_t
H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr,
int mpi_rank, int mpi_size)
{
- unsigned first_entry_to_flush;
- unsigned last_entry_to_flush;
- unsigned total_entries_to_clear = 0;
- unsigned total_entries_to_flush = 0;
+ unsigned first_entry_to_flush;
+ unsigned last_entry_to_flush;
+#ifndef NDEBUG
+ unsigned total_entries_to_clear = 0;
+ unsigned total_entries_to_flush = 0;
+#endif
unsigned *candidate_assignment_table = NULL;
unsigned entries_to_flush[H5C_RING_NTYPES];
unsigned entries_to_clear[H5C_RING_NTYPES];
@@ -316,12 +318,16 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
* markings.
*/
if (u >= first_entry_to_flush && u <= last_entry_to_flush) {
+#ifndef NDEBUG
total_entries_to_flush++;
+#endif
entries_to_flush[entry_ptr->ring]++;
entry_ptr->flush_immediately = TRUE;
} /* end if */
else {
+#ifndef NDEBUG
total_entries_to_clear++;
+#endif
entries_to_clear[entry_ptr->ring]++;
entry_ptr->clear_on_unprotect = TRUE;
} /* end else */
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index cfc7bcf..2cad464 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -76,41 +76,37 @@
#ifdef H5C_DO_SANITY_CHECKS
#define H5C__GEN_DLL_PRE_REMOVE_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \
-if (((head_ptr) == NULL) || ((tail_ptr) == NULL) || \
- ((entry_ptr) == NULL) || ((len) <= 0) || \
- ((list_size) < (entry_ptr)->size) || \
- ((entry_ptr)->list_prev == NULL && (head_ptr) != (entry_ptr)) || \
- ((entry_ptr)->list_next == NULL && (tail_ptr) != (entry_ptr)) || \
+if ((head_ptr) == NULL || (tail_ptr) == NULL || \
+ (entry_ptr) == NULL || (len) <= 0 || \
+ (list_size) < (entry_ptr)->size || \
+ ((entry_ptr)->list_prev == NULL && (head_ptr) != (entry_ptr)) || \
+ ((entry_ptr)->list_next == NULL && (tail_ptr) != (entry_ptr)) || \
((len) == 1 && \
!((head_ptr) == (entry_ptr) && (tail_ptr) == (entry_ptr) && \
- (entry_ptr)->list_next == NULL && (entry_ptr)->list_prev == NULL && \
+ (entry_ptr)->list_next == NULL && (entry_ptr)->list_prev == NULL && \
(list_size) == (entry_ptr)->size \
) \
) \
) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre remove SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre remove SC failed") \
}
#define H5C__GEN_DLL_PRE_INSERT_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \
if ((entry_ptr) == NULL || (entry_ptr)->list_next != NULL || (entry_ptr)->list_prev != NULL || \
- (((head_ptr) == NULL || (tail_ptr) == NULL) && (head_ptr) != (tail_ptr)) || \
- ( (len) == 0 && \
- ((list_size) > 0 || \
- (head_ptr) != NULL || (tail_ptr) != NULL \
- ) \
- ) || \
- ((len) == 1 && \
- ((head_ptr) != (tail_ptr) || \
- (head_ptr) == NULL || (head_ptr)->size != (list_size) \
- ) \
- ) || \
- ((len) >= 1 && \
- ((head_ptr) == NULL || (head_ptr)->list_prev != NULL || \
- (tail_ptr) == NULL || (tail_ptr)->list_next != NULL \
- ) \
- ) \
- ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \
+ (((head_ptr) == NULL || (tail_ptr) == NULL) && (head_ptr) != (tail_ptr)) || \
+ ((len) == 0 && \
+ ((list_size) > 0 || (head_ptr) != NULL || (tail_ptr) != NULL) \
+ ) || \
+ ((len) == 1 && \
+ ((head_ptr) != (tail_ptr) || (head_ptr) == NULL || \
+ (head_ptr)->size != (list_size)) \
+ ) || \
+ ((len) >= 1 && \
+ ((head_ptr) == NULL || (head_ptr)->list_prev != NULL || \
+ (tail_ptr) == NULL || (tail_ptr)->list_next != NULL) \
+ ) \
+ ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL pre insert SC failed") \
}
#define H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \
@@ -122,8 +118,7 @@ if ((dll_len) <= 0 || (dll_size) <= 0 || (old_size) <= 0 || \
}
#define H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \
-if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
- ) { \
+if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "DLL post size update SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
@@ -146,7 +141,7 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
(tail_ptr) = (entry_ptr); \
} \
(len)++; \
- (list_size) += (entry_ptr)->size; \
+ (list_size) += (entry_ptr)->size; \
} /* H5C__GEN_DLL_APPEND() */
#define H5C__GEN_DLL_PREPEND(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \
@@ -157,43 +152,43 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
(tail_ptr) = (entry_ptr); \
} \
else { \
- (head_ptr)->list_prev = (entry_ptr); \
- (entry_ptr)->list_next = (head_ptr); \
+ (head_ptr)->list_prev = (entry_ptr); \
+ (entry_ptr)->list_next = (head_ptr); \
(head_ptr) = (entry_ptr); \
} \
(len)++; \
- (list_size) += (entry_ptr)->size; \
+ (list_size) += (entry_ptr)->size; \
} /* H5C__GEN_DLL_PREPEND() */
#define H5C__GEN_DLL_REMOVE(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \
{ \
H5C__GEN_DLL_PRE_REMOVE_SC(entry_ptr, list_next, list_prev, head_ptr, tail_ptr, len, list_size, fail_val) \
if ((head_ptr) == (entry_ptr)) { \
- (head_ptr) = (entry_ptr)->list_next; \
+ (head_ptr) = (entry_ptr)->list_next; \
if ((head_ptr) != NULL) \
- (head_ptr)->list_prev = NULL; \
+ (head_ptr)->list_prev = NULL; \
} \
else \
- (entry_ptr)->list_prev->list_next = (entry_ptr)->list_next; \
+ (entry_ptr)->list_prev->list_next = (entry_ptr)->list_next; \
if ((tail_ptr) == (entry_ptr)) { \
- (tail_ptr) = (entry_ptr)->list_prev; \
+ (tail_ptr) = (entry_ptr)->list_prev; \
if ((tail_ptr) != NULL) \
- (tail_ptr)->list_next = NULL; \
+ (tail_ptr)->list_next = NULL; \
} \
else \
- (entry_ptr)->list_next->list_prev = (entry_ptr)->list_prev; \
- (entry_ptr)->list_next = NULL; \
- (entry_ptr)->list_prev = NULL; \
+ (entry_ptr)->list_next->list_prev = (entry_ptr)->list_prev; \
+ (entry_ptr)->list_next = NULL; \
+ (entry_ptr)->list_prev = NULL; \
(len)--; \
- (list_size) -= (entry_ptr)->size; \
+ (list_size) -= (entry_ptr)->size; \
} /* H5C__GEN_DLL_REMOVE() */
#define H5C__GEN_DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size, fail_val) \
{ \
- H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \
+ H5C__GEN_DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \
(dll_size) -= (old_size); \
(dll_size) += (new_size); \
- H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \
+ H5C__GEN_DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size, fail_val) \
} /* H5C__GEN_DLL_UPDATE_FOR_SIZE_CHANGE() */
@@ -236,7 +231,7 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
***********************************************************************/
#define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \
- (cache_ptr)->cache_accesses++; \
+ (cache_ptr)->cache_accesses++; \
if (hit) \
(cache_ptr)->cache_hits++;
@@ -263,27 +258,27 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \
(cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
-#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \
- if ((cache_ptr)->flush_in_progress) \
+#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \
+ if ((cache_ptr)->flush_in_progress) \
(cache_ptr)->cache_flush_moves[(entry_ptr)->type->id]++; \
- if ((entry_ptr)->flush_in_progress) \
+ if ((entry_ptr)->flush_in_progress) \
(cache_ptr)->entry_flush_moves[(entry_ptr)->type->id]++; \
(cache_ptr)->moves[(entry_ptr)->type->id]++; \
(cache_ptr)->entries_relocated_counter++;
-#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\
- if ((cache_ptr)->flush_in_progress) \
+#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \
+ if ((cache_ptr)->flush_in_progress) \
(cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id]++; \
- if ((entry_ptr)->flush_in_progress) \
+ if ((entry_ptr)->flush_in_progress) \
(cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id]++; \
if ((entry_ptr)->size < (new_size)) { \
(cache_ptr)->size_increases[(entry_ptr)->type->id]++; \
- H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
+ H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \
- (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
+ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \
- (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
- } else if ((entry_ptr)->size > (new_size)) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
+ } else if ((entry_ptr)->size > (new_size)) \
(cache_ptr)->size_decreases[(entry_ptr)->type->id]++;
#define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
@@ -292,13 +287,13 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
#define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
(cache_ptr)->total_ht_deletions++;
-#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \
- if (success) { \
- (cache_ptr)->successful_ht_searches++; \
- (cache_ptr)->total_successful_ht_search_depth += depth; \
- } else { \
- (cache_ptr)->failed_ht_searches++; \
- (cache_ptr)->total_failed_ht_search_depth += depth; \
+#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \
+ if (success) { \
+ (cache_ptr)->successful_ht_searches++; \
+ (cache_ptr)->total_successful_ht_search_depth += depth; \
+ } else { \
+ (cache_ptr)->failed_ht_searches++; \
+ (cache_ptr)->total_failed_ht_search_depth += depth; \
}
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \
@@ -316,35 +311,35 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
#if H5C_COLLECT_CACHE_ENTRY_STATS
#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \
-{ \
- (entry_ptr)->accesses = 0; \
- (entry_ptr)->clears = 0; \
- (entry_ptr)->flushes = 0; \
- (entry_ptr)->pins = 0; \
+{ \
+ (entry_ptr)->accesses = 0; \
+ (entry_ptr)->clears = 0; \
+ (entry_ptr)->flushes = 0; \
+ (entry_ptr)->pins = 0; \
}
-#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
-{ \
+#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
+{ \
(cache_ptr)->clears[(entry_ptr)->type->id]++; \
- if((entry_ptr)->is_pinned) \
+ if((entry_ptr)->is_pinned) \
(cache_ptr)->pinned_clears[(entry_ptr)->type->id]++; \
- (entry_ptr)->clears++; \
+ (entry_ptr)->clears++; \
}
-#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
-{ \
+#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
+{ \
(cache_ptr)->flushes[(entry_ptr)->type->id]++; \
- if((entry_ptr)->is_pinned) \
+ if((entry_ptr)->is_pinned) \
(cache_ptr)->pinned_flushes[(entry_ptr)->type->id]++; \
- (entry_ptr)->flushes++; \
+ (entry_ptr)->flushes++; \
}
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
{ \
- if (take_ownership) \
- (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \
+ if (take_ownership) \
+ (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \
else \
- (cache_ptr)->evictions[(entry_ptr)->type->id]++; \
+ (cache_ptr)->evictions[(entry_ptr)->type->id]++; \
if ((entry_ptr)->accesses > (cache_ptr)->max_accesses[(entry_ptr)->type->id]) \
(cache_ptr)->max_accesses[(entry_ptr)->type->id] = (entry_ptr)->accesses; \
if ((entry_ptr)->accesses < (cache_ptr)->min_accesses[(entry_ptr)->type->id]) \
@@ -353,68 +348,68 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
(cache_ptr)->max_clears[(entry_ptr)->type->id] = (entry_ptr)->clears; \
if ((entry_ptr)->flushes > (cache_ptr)->max_flushes[(entry_ptr)->type->id]) \
(cache_ptr)->max_flushes[(entry_ptr)->type->id] = (entry_ptr)->flushes; \
- if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \
- (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \
- if ((entry_ptr)->pins > (cache_ptr)->max_pins[(entry_ptr)->type->id]) \
- (cache_ptr)->max_pins[(entry_ptr)->type->id] = (entry_ptr)->pins; \
+ if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \
+ (cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \
+ if ((entry_ptr)->pins > (cache_ptr)->max_pins[(entry_ptr)->type->id]) \
+ (cache_ptr)->max_pins[(entry_ptr)->type->id] = (entry_ptr)->pins; \
}
-#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
-{ \
- (cache_ptr)->insertions[(entry_ptr)->type->id]++; \
- if ((entry_ptr)->is_pinned) { \
- (cache_ptr)->pinned_insertions[(entry_ptr)->type->id]++; \
- (cache_ptr)->pins[(entry_ptr)->type->id]++; \
- (entry_ptr)->pins++; \
- if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \
- (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
- if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \
- (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
- } \
- if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \
- (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
- H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
- if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \
- (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
- if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \
- (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
+#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
+{ \
+ (cache_ptr)->insertions[(entry_ptr)->type->id]++; \
+ if ((entry_ptr)->is_pinned) { \
+ (cache_ptr)->pinned_insertions[(entry_ptr)->type->id]++; \
+ (cache_ptr)->pins[(entry_ptr)->type->id]++; \
+ (entry_ptr)->pins++; \
+ if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
+ } \
+ if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
+ if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \
+ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
+ if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \
+ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \
(cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \
- (cache_ptr)->entries_inserted_counter++; \
+ (cache_ptr)->entries_inserted_counter++; \
}
-#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
-{ \
+#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
+{ \
if (hit) \
(cache_ptr)->hits[(entry_ptr)->type->id]++; \
- else \
+ else \
(cache_ptr)->misses[(entry_ptr)->type->id]++; \
- if (!(entry_ptr)->is_read_only) \
+ if (!(entry_ptr)->is_read_only) \
(cache_ptr)->write_protects[(entry_ptr)->type->id]++; \
else { \
(cache_ptr)->read_protects[(entry_ptr)->type->id]++; \
- if ((entry_ptr)->ro_ref_count > (cache_ptr)->max_read_protects[(entry_ptr)->type->id]) \
- (cache_ptr)->max_read_protects[(entry_ptr)->type->id] = (entry_ptr)->ro_ref_count; \
- } \
+ if ((entry_ptr)->ro_ref_count > (cache_ptr)->max_read_protects[(entry_ptr)->type->id]) \
+ (cache_ptr)->max_read_protects[(entry_ptr)->type->id] = (entry_ptr)->ro_ref_count; \
+ } \
if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \
- (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
- H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
if ((cache_ptr)->pl_len > (cache_ptr)->max_pl_len) \
- (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
+ (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \
- (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
- if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
+ if ((entry_ptr)->size > (cache_ptr)->max_size[(entry_ptr)->type->id]) \
(cache_ptr)->max_size[(entry_ptr)->type->id] = (entry_ptr)->size; \
(entry_ptr)->accesses++; \
}
#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
{ \
- (cache_ptr)->pins[(entry_ptr)->type->id]++; \
+ (cache_ptr)->pins[(entry_ptr)->type->id]++; \
(entry_ptr)->pins++; \
- if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \
+ if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \
(cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
- if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \
+ if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \
(cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
}
@@ -422,80 +417,80 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
-#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
-{ \
+#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
+{ \
(cache_ptr)->clears[(entry_ptr)->type->id]++; \
- if((entry_ptr)->is_pinned) \
+ if((entry_ptr)->is_pinned) \
(cache_ptr)->pinned_clears[(entry_ptr)->type->id]++; \
}
-#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
-{ \
+#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
+{ \
(cache_ptr)->flushes[(entry_ptr)->type->id]++; \
- if ((entry_ptr)->is_pinned) \
+ if ((entry_ptr)->is_pinned) \
(cache_ptr)->pinned_flushes[(entry_ptr)->type->id]++; \
}
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
-{ \
- if (take_ownership) \
- (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \
- else \
- (cache_ptr)->evictions[(entry_ptr)->type->id]++; \
+{ \
+ if (take_ownership) \
+ (cache_ptr)->take_ownerships[(entry_ptr)->type->id]++; \
+ else \
+ (cache_ptr)->evictions[(entry_ptr)->type->id]++; \
}
-#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
-{ \
- (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
- if ( (entry_ptr)->is_pinned ) { \
- (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \
- ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
- if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
- (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
- if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
- (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
- } \
- if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
- (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
- H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
- if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
- (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
- if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
- (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
- (cache_ptr)->entries_inserted_counter++; \
+#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
+{ \
+ (cache_ptr)->insertions[(entry_ptr)->type->id]++; \
+ if ((entry_ptr)->is_pinned) { \
+ (cache_ptr)->pinned_insertions[(entry_ptr)->type->id]++; \
+ (cache_ptr)->pins[(entry_ptr)->type->id]++; \
+ if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
+ } \
+ if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
+ if ((cache_ptr)->slist_len > (cache_ptr)->max_slist_len) \
+ (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
+ if ((cache_ptr)->slist_size > (cache_ptr)->max_slist_size) \
+ (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
+ (cache_ptr)->entries_inserted_counter++; \
}
-#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
-{ \
- if ( hit ) \
- ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
- else \
- ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
- if ( ! ((entry_ptr)->is_read_only) ) \
- ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \
- else { \
- ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \
- if ( ((entry_ptr)->ro_ref_count) > \
- ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \
- ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \
- ((entry_ptr)->ro_ref_count); \
- } \
- if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
- (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
- H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
- if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
- (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
- if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
- (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
+#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
+{ \
+ if (hit) \
+ (cache_ptr)->hits[(entry_ptr)->type->id]++; \
+ else \
+ (cache_ptr)->misses[(entry_ptr)->type->id]++; \
+ if (!(entry_ptr)->is_read_only) \
+ (cache_ptr)->write_protects[(entry_ptr)->type->id]++; \
+ else { \
+ (cache_ptr)->read_protects[(entry_ptr)->type->id]++; \
+ if ((entry_ptr)->ro_ref_count > \
+ (cache_ptr)->max_read_protects[(entry_ptr)->type->id]) \
+ (cache_ptr)->max_read_protects[(entry_ptr)->type->id] = \
+ (entry_ptr)->ro_ref_count; \
+ } \
+ if ((cache_ptr)->index_len > (cache_ptr)->max_index_len) \
+ (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
+ H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
+ if ((cache_ptr)->pl_len > (cache_ptr)->max_pl_len) \
+ (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
+ if ((cache_ptr)->pl_size > (cache_ptr)->max_pl_size) \
+ (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
}
-#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
-{ \
- ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
- if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
- (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
- if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
- (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
+#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
+{ \
+ (cache_ptr)->pins[(entry_ptr)->type->id]++; \
+ if ((cache_ptr)->pel_len > (cache_ptr)->max_pel_len) \
+ (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
+ if ((cache_ptr)->pel_size > (cache_ptr)->max_pel_size) \
+ (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
}
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
@@ -531,318 +526,267 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size)) \
* The following macros handle searches, insertions, and deletion in
* the hash table.
*
- * When modifying these macros, remember to modify the similar macros
- * in tst/cache.c
- *
***********************************************************************/
-/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */
-
-#define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3)
-
+#define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3)
#define H5C__HASH_FCN(x) (int)((unsigned)((x) & H5C__HASH_MASK) >> 3)
+#define H5C__POST_HT_SHIFT_TO_FRONT_SC_CMP(cache_ptr, entry_ptr, k) \
+((cache_ptr) == NULL || (cache_ptr)->index[k] != (entry_ptr) || \
+ (entry_ptr)->ht_prev != NULL \
+)
+#define H5C__PRE_HT_SEARCH_SC_CMP(cache_ptr, entry_addr) \
+((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ !H5F_addr_defined(entry_addr) || \
+ H5C__HASH_FCN(entry_addr) < 0 || \
+ H5C__HASH_FCN(entry_addr) >= H5C__HASH_TABLE_LEN \
+)
+#define H5C__POST_SUC_HT_SEARCH_SC_CMP(cache_ptr, entry_ptr, k) \
+((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (cache_ptr)->index_len < 1 || \
+ (entry_ptr) == NULL || \
+ (cache_ptr)->index_size < (entry_ptr)->size || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (entry_ptr)->size <= 0 || \
+ (cache_ptr)->index[k] == NULL || \
+ ((cache_ptr)->index[k] != (entry_ptr) && (entry_ptr)->ht_prev == NULL) || \
+ ((cache_ptr)->index[k] == (entry_ptr) && (entry_ptr)->ht_prev != NULL) || \
+ ((entry_ptr)->ht_prev != NULL && (entry_ptr)->ht_prev->ht_next != (entry_ptr)) || \
+ ((entry_ptr)->ht_next != NULL && (entry_ptr)->ht_next->ht_prev != (entry_ptr)) \
+)
+
#ifdef H5C_DO_SANITY_CHECKS
#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (entry_ptr) == NULL ) || \
- ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
- ( (entry_ptr)->ht_next != NULL ) || \
- ( (entry_ptr)->ht_prev != NULL ) || \
- ( (entry_ptr)->size <= 0 ) || \
- ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
- ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
- ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (entry_ptr) == NULL || !H5F_addr_defined((entry_ptr)->addr) || \
+ (entry_ptr)->ht_next != NULL || (entry_ptr)->ht_prev != NULL || \
+ (entry_ptr)->size <= 0 || \
+ H5C__HASH_FCN((entry_ptr)->addr) < 0 || \
+ H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ (entry_ptr)->ring <= H5C_RING_UNDEFINED || \
+ (entry_ptr)->ring >= H5C_RING_NTYPES || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
- ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
- ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \
+ (cache_ptr)->index_len != (cache_ptr)->il_len || \
+ (cache_ptr)->index_size != (cache_ptr)->il_size \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
}
#define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
- ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
- ( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \
+ (cache_ptr)->index_len != (cache_ptr)->il_len || \
+ (cache_ptr)->index_size != (cache_ptr)->il_size \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
}
-#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (cache_ptr)->index_len < 1 ) || \
- ( (entry_ptr) == NULL ) || \
- ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
- ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
- ( (entry_ptr)->size <= 0 ) || \
- ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
- ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
- ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
- == NULL ) || \
- ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
- != (entry_ptr) ) && \
- ( (entry_ptr)->ht_prev == NULL ) ) || \
- ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \
- (entry_ptr) ) && \
- ( (entry_ptr)->ht_prev != NULL ) ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
- ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] < \
- (entry_ptr)->size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
+if ( (cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (cache_ptr)->index_len < 1 || \
+ (entry_ptr) == NULL || \
+ (cache_ptr)->index_size < (entry_ptr)->size || \
+ !H5F_addr_defined((entry_ptr)->addr) || \
+ (entry_ptr)->size <= 0 || \
+ H5C__HASH_FCN((entry_ptr)->addr) < 0 || \
+ H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN || \
+ (cache_ptr)->index[H5C__HASH_FCN((entry_ptr)->addr)] == NULL || \
+ ((cache_ptr)->index[H5C__HASH_FCN((entry_ptr)->addr)] != (entry_ptr) && \
+ (entry_ptr)->ht_prev == NULL) || \
+ ((cache_ptr)->index[H5C__HASH_FCN((entry_ptr)->addr)] == (entry_ptr) && \
+ (entry_ptr)->ht_prev != NULL) || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ (entry_ptr)->ring <= H5C_RING_UNDEFINED || \
+ (entry_ptr)->ring >= H5C_RING_NTYPES || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] < (entry_ptr)->size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
- ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
- ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \
+ (cache_ptr)->index_len != (cache_ptr)->il_len || \
+ (cache_ptr)->index_size != (cache_ptr)->il_size \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT remove SC failed") \
}
-#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (entry_ptr) == NULL ) || \
- ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
- ( (entry_ptr)->size <= 0 ) || \
- ( (entry_ptr)->ht_prev != NULL ) || \
- ( (entry_ptr)->ht_prev != NULL ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
- ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
- ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
+if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (entry_ptr) == NULL || !H5F_addr_defined((entry_ptr)->addr) || \
+ (entry_ptr)->size <= 0 || \
+ (entry_ptr)->ht_next != NULL || \
+ (entry_ptr)->ht_prev != NULL || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \
+ (cache_ptr)->index_len != (cache_ptr)->il_len || \
+ (cache_ptr)->index_size != (cache_ptr)->il_size \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT remove SC failed") \
}
-/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
-#define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( ! H5F_addr_defined(entry_addr) ) || \
- ( H5C__HASH_FCN(entry_addr) < 0 ) || \
- ( H5C__HASH_FCN(entry_addr) >= H5C__HASH_TABLE_LEN ) ) { \
+#define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \
+if (H5C__PRE_HT_SEARCH_SC_CMP(cache_ptr, entry_addr)) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT search SC failed") \
}
-/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (cache_ptr)->index_len < 1 ) || \
- ( (entry_ptr) == NULL ) || \
- ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( (entry_ptr)->size <= 0 ) || \
- ( ((cache_ptr)->index)[k] == NULL ) || \
- ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
- ( (entry_ptr)->ht_prev == NULL ) ) || \
- ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \
- ( (entry_ptr)->ht_prev != NULL ) ) || \
- ( ( (entry_ptr)->ht_prev != NULL ) && \
- ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
- ( ( (entry_ptr)->ht_next != NULL ) && \
- ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
+if(H5C__POST_SUC_HT_SEARCH_SC_CMP(cache_ptr, entry_ptr, k)) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post successful HT search SC failed") \
}
-/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */
-#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
- ( (entry_ptr)->ht_prev != NULL ) ) { \
+#define H5C__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k, fail_val) \
+if(H5C__POST_HT_SHIFT_TO_FRONT_SC_CMP(cache_ptr, entry_ptr, k)) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT shift to front SC failed") \
}
-#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->index_len <= 0 ) || \
- ( (cache_ptr)->index_size <= 0 ) || \
- ( (new_size) <= 0 ) || \
- ( (old_size) > (cache_ptr)->index_size ) || \
- ( ( (cache_ptr)->index_len == 1 ) && \
- ( (cache_ptr)->index_size != (old_size) ) ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( ( !( was_clean ) || \
- ( (cache_ptr)->clean_index_size < (old_size) ) ) && \
- ( ( (was_clean) ) || \
- ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
- ( (entry_ptr) == NULL ) || \
- ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
- ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
- ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
- ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean, fail_val) \
+if ((cache_ptr) == NULL || \
+ (cache_ptr)->index_len <= 0 || (cache_ptr)->index_size <= 0 || \
+ (new_size) <= 0 || (old_size) > (cache_ptr)->index_size || \
+ ((cache_ptr)->index_len == 1 && (cache_ptr)->index_size != (old_size)) || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ ((!(was_clean) || (cache_ptr)->clean_index_size < (old_size)) && \
+ ((was_clean) || (cache_ptr)->dirty_index_size < (old_size))) || \
+ (entry_ptr) == NULL || \
+ (entry_ptr)->ring <= H5C_RING_UNDEFINED || \
+ (entry_ptr)->ring >= H5C_RING_NTYPES || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \
+ (cache_ptr)->index_len != (cache_ptr)->il_len || \
+ (cache_ptr)->index_size != (cache_ptr)->il_size \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT entry size change SC failed") \
}
-#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, fail_val) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->index_len <= 0 ) || \
- ( (cache_ptr)->index_size <= 0 ) || \
- ( (new_size) > (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( ( !((entry_ptr)->is_dirty ) || \
- ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
- ( ( ((entry_ptr)->is_dirty) ) || \
- ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
- ( ( (cache_ptr)->index_len == 1 ) && \
- ( (cache_ptr)->index_size != (new_size) ) ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
- ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
- ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, fail_val) \
+if ((cache_ptr) == NULL || \
+ (cache_ptr)->index_len <= 0 || (cache_ptr)->index_size <= 0 || \
+ (new_size) > (cache_ptr)->index_size || \
+ (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ ((!((entry_ptr)->is_dirty ) || (cache_ptr)->dirty_index_size < (new_size)) && \
+ ((entry_ptr)->is_dirty || (cache_ptr)->clean_index_size < (new_size)) \
+ ) || \
+ ((cache_ptr)->index_len == 1 && (cache_ptr)->index_size != (new_size)) || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) || \
+ (cache_ptr)->index_len != (cache_ptr)->il_len || \
+ (cache_ptr)->index_size != (cache_ptr)->il_size \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT entry size change SC failed") \
}
-#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \
-if ( \
- ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (cache_ptr)->index_len <= 0 ) || \
- ( (entry_ptr) == NULL ) || \
- ( (entry_ptr)->is_dirty != FALSE ) || \
- ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
- ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
- ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \
+if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (cache_ptr)->index_len <= 0 || \
+ (entry_ptr) == NULL || (entry_ptr)->is_dirty != FALSE || \
+ (cache_ptr)->index_size < (entry_ptr)->size || \
+ (cache_ptr)->dirty_index_size < (entry_ptr)->size || \
+ (cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) || \
+ (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) || \
+ (entry_ptr)->ring <= H5C_RING_UNDEFINED || \
+ (entry_ptr)->ring >= H5C_RING_NTYPES || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT update for entry clean SC failed") \
}
-#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \
-if ( \
- ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
- ( (cache_ptr)->index_len <= 0 ) || \
- ( (entry_ptr) == NULL ) || \
- ( (entry_ptr)->is_dirty != TRUE ) || \
- ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
- ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \
- ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
- ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \
+if ((cache_ptr) == NULL || (cache_ptr)->magic != H5C__H5C_T_MAGIC || \
+ (cache_ptr)->index_len <= 0 || \
+ (entry_ptr) == NULL || (entry_ptr)->is_dirty != TRUE || \
+ (cache_ptr)->index_size < (entry_ptr)->size || \
+ (cache_ptr)->clean_index_size < (entry_ptr)->size || \
+ (cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ (entry_ptr)->ring <= H5C_RING_UNDEFINED || \
+ (entry_ptr)->ring >= H5C_RING_NTYPES || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "pre HT update for entry dirty SC failed") \
}
-#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \
+if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT update for entry clean SC failed") \
}
-#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \
-if ( ( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
- ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
- (cache_ptr)->index_len ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
- (cache_ptr)->index_size ) || \
- ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \
+if ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) || \
+ (cache_ptr)->index_size < (cache_ptr)->clean_index_size || \
+ (cache_ptr)->index_size < (cache_ptr)->dirty_index_size || \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size || \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
+ ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fail_val), "post HT update for entry dirty SC failed") \
}
@@ -854,13 +798,11 @@ if ( ( (cache_ptr)->index_size != \
#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val)
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val)
#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val)
-#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
+#define H5C__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k, fail_val)
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val)
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val)
-#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean, fail_val)
-#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, fail_val)
+#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr, was_clean, fail_val)
+#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr, fail_val)
#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val)
#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val)
@@ -872,24 +814,21 @@ if ( ( (cache_ptr)->index_size != \
int k; \
H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
k = H5C__HASH_FCN((entry_ptr)->addr); \
- if(((cache_ptr)->index)[k] != NULL) { \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ if((cache_ptr)->index[k] != NULL) { \
+ (entry_ptr)->ht_next = (cache_ptr)->index[k]; \
(entry_ptr)->ht_next->ht_prev = (entry_ptr); \
} \
- ((cache_ptr)->index)[k] = (entry_ptr); \
+ (cache_ptr)->index[k] = (entry_ptr); \
(cache_ptr)->index_len++; \
(cache_ptr)->index_size += (entry_ptr)->size; \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring])++; \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) \
- += (entry_ptr)->size; \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring]++; \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \
if((entry_ptr)->is_dirty) { \
(cache_ptr)->dirty_index_size += (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
- += (entry_ptr)->size; \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \
} else { \
(cache_ptr)->clean_index_size += (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \
- += (entry_ptr)->size; \
+ (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \
} \
if((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries++; \
@@ -905,29 +844,26 @@ if ( ( (cache_ptr)->index_size != \
#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \
{ \
int k; \
- H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
+ H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
k = H5C__HASH_FCN((entry_ptr)->addr); \
if((entry_ptr)->ht_next) \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
if((entry_ptr)->ht_prev) \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- if(((cache_ptr)->index)[k] == (entry_ptr)) \
- ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
+ if((cache_ptr)->index[k] == (entry_ptr)) \
+ (cache_ptr)->index[k] = (entry_ptr)->ht_next; \
(entry_ptr)->ht_next = NULL; \
(entry_ptr)->ht_prev = NULL; \
(cache_ptr)->index_len--; \
(cache_ptr)->index_size -= (entry_ptr)->size; \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring])--; \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) \
- -= (entry_ptr)->size; \
+ (cache_ptr)->index_ring_len[(entry_ptr)->ring]--; \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \
if((entry_ptr)->is_dirty) { \
(cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
- -= (entry_ptr)->size; \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \
} else { \
(cache_ptr)->clean_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \
- -= (entry_ptr)->size; \
+ (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \
} \
if((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries--; \
@@ -937,209 +873,170 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->il_tail, (cache_ptr)->il_len, \
(cache_ptr)->il_size, fail_val) \
H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
- H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
+ H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr, fail_val) \
}
-#define H5C__SEARCH_INDEX(cache_ptr, entry_addr, entry_ptr, fail_val) \
-{ \
- int k; \
- int depth = 0; \
- H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \
- k = H5C__HASH_FCN(entry_addr); \
- entry_ptr = ((cache_ptr)->index)[k]; \
- while(entry_ptr) { \
- if(H5F_addr_eq(entry_addr, (entry_ptr)->addr)) { \
- H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
- if(entry_ptr != ((cache_ptr)->index)[k]) { \
- if((entry_ptr)->ht_next) \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- HDassert((entry_ptr)->ht_prev != NULL); \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_prev = NULL; \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
- } \
- break; \
- } \
- (entry_ptr) = (entry_ptr)->ht_next; \
- (depth)++; \
- } \
- H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \
+#define H5C__SEARCH_INDEX(cache_ptr, entry_addr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ int depth = 0; \
+ H5C__PRE_HT_SEARCH_SC(cache_ptr, entry_addr, fail_val) \
+ k = H5C__HASH_FCN(entry_addr); \
+ (entry_ptr) = (cache_ptr)->index[k]; \
+ while(entry_ptr) { \
+ if(H5F_addr_eq(entry_addr, (entry_ptr)->addr)) { \
+ H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
+ if((entry_ptr) != (cache_ptr)->index[k]) { \
+ if((entry_ptr)->ht_next) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ HDassert((entry_ptr)->ht_prev != NULL); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ (cache_ptr)->index[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = (cache_ptr)->index[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ (cache_ptr)->index[k] = (entry_ptr); \
+ H5C__POST_HT_SHIFT_TO_FRONT_SC(cache_ptr, entry_ptr, k, fail_val) \
+ } \
+ break; \
+ } \
+ (entry_ptr) = (entry_ptr)->ht_next; \
+ (depth)++; \
+ } \
+ H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, ((entry_ptr) != NULL), depth) \
}
-#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, fail_val) \
-{ \
+#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, fail_val) \
+{ \
H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \
- (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
- -= (entry_ptr)->size; \
- (cache_ptr)->clean_index_size += (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \
- += (entry_ptr)->size; \
+ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \
+ (cache_ptr)->clean_index_size += (entry_ptr)->size; \
+ (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \
H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr, fail_val) \
}
-#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, fail_val) \
-{ \
+#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, fail_val) \
+{ \
H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \
- (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring]) \
- -= (entry_ptr)->size; \
- (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) \
- += (entry_ptr)->size; \
+ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
+ (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] -= (entry_ptr)->size; \
+ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] += (entry_ptr)->size; \
H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr, fail_val) \
}
#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean, fail_val) \
+ entry_ptr, was_clean, fail_val) \
{ \
H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean, fail_val) \
+ entry_ptr, was_clean, fail_val) \
(cache_ptr)->index_size -= (old_size); \
(cache_ptr)->index_size += (new_size); \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) -= (old_size); \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring]) += (new_size); \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] -= (old_size); \
+ (cache_ptr)->index_ring_size[(entry_ptr)->ring] += (new_size); \
if(was_clean) { \
(cache_ptr)->clean_index_size -= (old_size); \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring])-= (old_size); \
+ (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] -= (old_size); \
} else { \
- (cache_ptr)->dirty_index_size -= (old_size); \
- ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])-= (old_size); \
+ (cache_ptr)->dirty_index_size -= (old_size); \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] -= (old_size); \
} \
if((entry_ptr)->is_dirty) { \
(cache_ptr)->dirty_index_size += (new_size); \
- ((cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])+= (new_size); \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring] += (new_size); \
} else { \
- (cache_ptr)->clean_index_size += (new_size); \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring])+= (new_size); \
+ (cache_ptr)->clean_index_size += (new_size); \
+ (cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] += (new_size); \
} \
H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \
(cache_ptr)->il_size, \
- (old_size), (new_size), (fail_val)) \
+ (old_size), (new_size), (fail_val)) \
H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, fail_val) \
+ entry_ptr, fail_val) \
}
/**************************************************************************
*
- * Skip list insertion and deletion macros:
+ * Skip list modification macros
*
**************************************************************************/
-/*-------------------------------------------------------------------------
- *
- * Macro: H5C__INSERT_ENTRY_IN_SLIST
- *
- * Purpose: Insert the specified instance of H5C_cache_entry_t into
- * the skip list in the specified instance of H5C_t. Update
- * the associated length and size fields.
- *
- * Return: N/A
- *
- * Programmer: John Mainzer, 5/10/04
- *
- *-------------------------------------------------------------------------
- */
-
-/* NOTE: The H5C__INSERT_ENTRY_IN_SLIST() macro is set up so that
- *
- * H5C_DO_SANITY_CHECKS
- *
- * and
- *
- * H5C_DO_SLIST_SANITY_CHECKS
- *
- * can be selected independently. This is easy to miss as the
- * two #defines are easy to confuse.
- */
-
#ifdef H5C_DO_SLIST_SANITY_CHECKS
-#define H5C_ENTRY_IN_SLIST(cache_ptr, entry_ptr) \
+#define H5C__ENTRY_IN_SLIST(cache_ptr, entry_ptr) \
H5C__entry_in_skip_list((cache_ptr), (entry_ptr))
#else /* H5C_DO_SLIST_SANITY_CHECKS */
-#define H5C_ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE
+#define H5C__ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
#ifdef H5C_DO_SANITY_CHECKS
-#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- \
- if ( (cache_ptr)->slist_enabled ) { \
- \
- HDassert( (entry_ptr) ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
- HDassert( !((entry_ptr)->in_slist) ); \
- HDassert( ! H5C_ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
- \
- if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \
- &((entry_ptr)->addr)) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
- "can't insert entry in skip list") \
- \
- (entry_ptr)->in_slist = TRUE; \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len++; \
- (cache_ptr)->slist_size += (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\
- (cache_ptr)->slist_len_increase++; \
- (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
- \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( (cache_ptr)->slist_size > 0 ); \
- \
- } else { /* slist disabled */ \
- \
- HDassert( (cache_ptr)->slist_len == 0 ); \
- HDassert( (cache_ptr)->slist_size == 0 ); \
- } \
-} /* H5C__INSERT_ENTRY_IN_SLIST */
+#define H5C__SLIST_INSERT_ENTRY_SC(cache_ptr, entry_ptr) \
+{ \
+ (cache_ptr)->slist_len_increase++; \
+ (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
+} /* H5C__SLIST_INSERT_ENTRY_SC() */
+#define H5C__SLIST_REMOVE_ENTRY_SC(cache_ptr, entry_ptr) \
+{ \
+ (cache_ptr)->slist_len_increase--; \
+ (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \
+} /* H5C__SLIST_REMOVE_ENTRY_SC() */
+#define H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
+{ \
+ (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
+ (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
+} /* H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC() */
#else /* H5C_DO_SANITY_CHECKS */
+#define H5C__SLIST_INSERT_ENTRY_SC(cache_ptr, entry_ptr)
+#define H5C__SLIST_REMOVE_ENTRY_SC(cache_ptr, entry_ptr)
+#define H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size)
+
+#endif /* H5C_DO_SANITY_CHECKS */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C__INSERT_ENTRY_IN_SLIST
+ *
+ * Purpose: Insert a cache entry into a cache's skip list. Updates
+ * the associated length and size fields.
+ *
+ * Note: This macro is set up so that H5C_DO_SANITY_CHECKS and
+ * H5C_DO_SLIST_SANITY_CHECKS can be selected independently.
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ *-------------------------------------------------------------------------
+ */
+
#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- \
- if ( (cache_ptr)->slist_enabled ) { \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
\
- HDassert( (entry_ptr) ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( ! H5C_ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
- HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
- HDassert( !((entry_ptr)->in_slist) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_ptr ); \
+ if((cache_ptr)->slist_enabled) { \
+ HDassert(entry_ptr); \
+ HDassert((entry_ptr)->size > 0); \
+ HDassert(H5F_addr_defined((entry_ptr)->addr)); \
+ HDassert(!(entry_ptr)->in_slist); \
+ HDassert(!H5C__ENTRY_IN_SLIST((cache_ptr), (entry_ptr))); \
+ HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
+ HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
+ HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len); \
+ HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size); \
+ HDassert((cache_ptr)->slist_ptr); \
\
- if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \
- &((entry_ptr)->addr)) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
- "can't insert entry in skip list") \
+ if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &((entry_ptr)->addr)) < 0) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
@@ -1147,288 +1044,160 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_size += (entry_ptr)->size; \
((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\
+ H5C__SLIST_INSERT_ENTRY_SC(cache_ptr, entry_ptr) \
\
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( (cache_ptr)->slist_size > 0 ); \
- \
+ HDassert((cache_ptr)->slist_len > 0); \
+ HDassert((cache_ptr)->slist_size > 0); \
} else { /* slist disabled */ \
- \
- HDassert( (cache_ptr)->slist_len == 0 ); \
- HDassert( (cache_ptr)->slist_size == 0 ); \
+ HDassert((cache_ptr)->slist_len == 0); \
+ HDassert((cache_ptr)->slist_size == 0); \
} \
} /* H5C__INSERT_ENTRY_IN_SLIST */
-#endif /* H5C_DO_SANITY_CHECKS */
-
/*-------------------------------------------------------------------------
*
- * Function: H5C__REMOVE_ENTRY_FROM_SLIST
+ * Macro: H5C__REMOVE_ENTRY_FROM_SLIST
*
- * Purpose: Remove the specified instance of H5C_cache_entry_t from the
- * index skip list in the specified instance of H5C_t. Update
+ * Purpose: Insert a cache entry into a cache's skip list. Updates
* the associated length and size fields.
*
- * Return: N/A
- *
* Programmer: John Mainzer, 5/10/04
*
*-------------------------------------------------------------------------
*/
-#ifdef H5C_DO_SANITY_CHECKS
-#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- \
- if ( (cache_ptr)->slist_enabled ) { \
- \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( (entry_ptr)->in_slist ); \
- HDassert( (cache_ptr)->slist_ptr ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
- \
- if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
- != (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
- "can't delete entry from skip list") \
- \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- if(!(during_flush)) \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len--; \
- HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
- (cache_ptr)->slist_size -= (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
- HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \
- (entry_ptr)->size ); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\
- (cache_ptr)->slist_len_increase--; \
- (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \
- (entry_ptr)->in_slist = FALSE; \
- \
- } else { /* slist disabled */ \
- \
- HDassert( (cache_ptr)->slist_len == 0 ); \
- HDassert( (cache_ptr)->slist_size == 0 ); \
- } \
-} /* H5C__REMOVE_ENTRY_FROM_SLIST */
-
-#else /* H5C_DO_SANITY_CHECKS */
-
-#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
+#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
\
- if ( (cache_ptr)->slist_enabled ) { \
+ if((cache_ptr)->slist_enabled) { \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0); \
+ HDassert((entry_ptr)->size > 0); \
+ HDassert((entry_ptr)->in_slist); \
+ HDassert((cache_ptr)->slist_ptr); \
+ HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
+ HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
+ HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len); \
+ HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size); \
+ HDassert((cache_ptr)->slist_size >= (entry_ptr)->size); \
\
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->in_slist ); \
- HDassert( (cache_ptr)->slist_ptr ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
+ if(H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) != (entry_ptr) ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't delete entry from skip list") \
\
- if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
- != (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
- "can't delete entry from skip list") \
- \
- HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert((cache_ptr)->slist_len > 0); \
if(!(during_flush)) \
(cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len--; \
- HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ HDassert((cache_ptr)->slist_size >= (entry_ptr)->size); \
(cache_ptr)->slist_size -= (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
- HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \
- (entry_ptr)->size ); \
+ (cache_ptr)->slist_ring_len[(entry_ptr)->ring]--; \
+ HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] >= (entry_ptr)->size); \
((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\
+ H5C__SLIST_REMOVE_ENTRY_SC(cache_ptr, entry_ptr) \
(entry_ptr)->in_slist = FALSE; \
- \
} else { /* slist disabled */ \
- \
- HDassert( (cache_ptr)->slist_len == 0 ); \
- HDassert( (cache_ptr)->slist_size == 0 ); \
+ HDassert((cache_ptr)->slist_len == 0); \
+ HDassert((cache_ptr)->slist_size == 0); \
} \
} /* H5C__REMOVE_ENTRY_FROM_SLIST */
-#endif /* H5C_DO_SANITY_CHECKS */
-
/*-------------------------------------------------------------------------
*
- * Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE
+ * Macro: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE
*
* Purpose: Update cache_ptr->slist_size for a change in the size of
* and entry in the slist.
*
- * Return: N/A
- *
* Programmer: John Mainzer, 9/07/05
*
*-------------------------------------------------------------------------
*/
-#ifdef H5C_DO_SANITY_CHECKS
-
#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
\
- if ( (cache_ptr)->slist_enabled ) { \
- \
- HDassert( (old_size) > 0 ); \
- HDassert( (new_size) > 0 ); \
- HDassert( (old_size) <= (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( ((cache_ptr)->slist_len > 1) || \
- ( (cache_ptr)->slist_size == (old_size) ) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
+ if((cache_ptr)->slist_enabled) { \
+ HDassert((old_size) > 0); \
+ HDassert((new_size) > 0); \
+ HDassert((old_size) <= (cache_ptr)->slist_size); \
+ HDassert((cache_ptr)->slist_len > 0); \
+ HDassert((cache_ptr)->slist_len > 1 || \
+ (cache_ptr)->slist_size == (old_size)); \
+ HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
+ HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
+ HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len); \
+ HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size); \
\
(cache_ptr)->slist_size -= (old_size); \
(cache_ptr)->slist_size += (new_size); \
\
- HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] \
- >= (old_size) ); \
- \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
+ HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] >= (old_size)); \
\
- (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
- (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
+ (cache_ptr)->slist_ring_size[(entry_ptr)->ring] -= (old_size); \
+ (cache_ptr)->slist_ring_size[(entry_ptr)->ring] += (new_size); \
\
- HDassert( (new_size) <= (cache_ptr)->slist_size ); \
- HDassert( ( (cache_ptr)->slist_len > 1 ) || \
- ( (cache_ptr)->slist_size == (new_size) ) ); \
+ H5C__SLIST_UPDATE_FOR_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
\
+ HDassert((new_size) <= (cache_ptr)->slist_size); \
+ HDassert((cache_ptr)->slist_len > 1 || \
+ (cache_ptr)->slist_size == (new_size)); \
} else { /* slist disabled */ \
- \
- HDassert( (cache_ptr)->slist_len == 0 ); \
- HDassert( (cache_ptr)->slist_size == 0 ); \
- } \
-} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
-
-#else /* H5C_DO_SANITY_CHECKS */
-
-#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- \
- if ( (cache_ptr)->slist_enabled ) { \
- \
- HDassert( (old_size) > 0 ); \
- HDassert( (new_size) > 0 ); \
- HDassert( (old_size) <= (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( ((cache_ptr)->slist_len > 1) || \
- ( (cache_ptr)->slist_size == (old_size) ) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
- \
- (cache_ptr)->slist_size -= (old_size); \
- (cache_ptr)->slist_size += (new_size); \
- \
- HDassert( (cache_ptr)->slist_ring_size[((entry_ptr)->ring)] >= \
- (old_size) ); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
- \
- HDassert( (new_size) <= (cache_ptr)->slist_size ); \
- HDassert( ( (cache_ptr)->slist_len > 1 ) || \
- ( (cache_ptr)->slist_size == (new_size) ) ); \
- \
- } else { /* slist disabled */ \
- \
- HDassert( (cache_ptr)->slist_len == 0 ); \
- HDassert( (cache_ptr)->slist_size == 0 ); \
+ HDassert((cache_ptr)->slist_len == 0); \
+ HDassert((cache_ptr)->slist_size == 0); \
} \
} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
-#endif /* H5C_DO_SANITY_CHECKS */
-
/**************************************************************************
*
- * Replacement policy update macros:
+ * Replacement policy update macros
*
**************************************************************************/
-/*-------------------------------------------------------------------------
- *
- * Macro: H5C__UPDATE_RP_FOR_EVICTION
- *
- * Purpose: Update the replacement policy data structures for an
- * eviction of the specified cache entry.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 5/10/04
- *
- *-------------------------------------------------------------------------
- */
-
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-#define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
+#define H5C__UPDATE_RP_FOR_EVICTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( !((entry_ptr)->is_pinned) ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- /* modified LRU specific code */ \
- \
- /* remove the entry from the LRU list. */ \
- \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
/* If the entry is clean when it is evicted, it should be on the \
* clean LRU list, if it was dirty, it should be on the dirty LRU list. \
* Remove it from the appropriate list according to the value of the \
* dirty flag. \
*/ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- \
+ if((entry_ptr)->is_dirty) { \
+ H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_EVICTION_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_FLUSH_CD_LRU(cache_ptr, entry_ptr, fail_val) \
+{ \
+ /* An entry being flushed or cleared, may not be dirty. Use the \
+ * dirty flag to infer whether the entry is on the clean or dirty \
+ * LRU list, and remove it. Then insert it at the head of the \
+ * clean LRU list. \
+ * \
+ * The function presumes that a dirty entry will be either cleared \
+ * or flushed shortly, so it is OK if we put a dirty entry on the \
+ * clean LRU list. \
+ */ \
+ if((entry_ptr)->is_dirty) { \
H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
(cache_ptr)->dLRU_tail_ptr, \
(cache_ptr)->dLRU_list_len, \
@@ -1440,73 +1209,225 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->cLRU_list_size, (fail_val)) \
} \
\
-} /* H5C__UPDATE_RP_FOR_EVICTION */
+ H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+} /* H5C__UPDATE_RP_FOR_FLUSH_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU(cache_ptr, entry_ptr, fail_val) \
+{ \
+ /* Insert the entry at the _tail_ of the clean or dirty LRU list as \
+ * appropriate. \
+ */ \
+ if((entry_ptr)->is_dirty) { \
+ H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_INSERTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \
+{ \
+ /* Insert the entry at the head of the clean or dirty LRU list as \
+ * appropriate. \
+ */ \
+ if((entry_ptr)->is_dirty) { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_INSERTION_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_PROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \
+{ \
+ /* Remove the entry from the clean or dirty LRU list as appropriate */ \
+ if((entry_ptr)->is_dirty) { \
+ H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_PROTECT_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_MOVE_CD_LRU(cache_ptr, entry_ptr, was_dirty, fail_val) \
+{ \
+ /* Remove the entry from either the clean or dirty LRU list as \
+ * indicated by the was_dirty parameter \
+ */ \
+ if(was_dirty) { \
+ H5C__AUX_DLL_REMOVE((entry_ptr), \
+ (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ \
+ } else { \
+ H5C__AUX_DLL_REMOVE((entry_ptr), \
+ (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* Insert the entry at the head of either the clean or dirty \
+ * LRU list as appropriate. \
+ */ \
+ if((entry_ptr)->is_dirty) { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_MOVE_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU(cache_ptr, entry_ptr, new_size, fail_val) \
+{ \
+ /* Update the size of the clean or dirty LRU list as \
+ * appropriate. \
+ */ \
+ if((entry_ptr)->is_dirty) { \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size), (fail_val)) \
+ \
+ } else { \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size), (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_UNPIN_CD_LRU(cache_ptr, entry_ptr, fail_val) \
+{ \
+ /* Insert the entry at the head of either the clean \
+ * or dirty LRU list as appropriate. \
+ */ \
+ if((entry_ptr)->is_dirty) { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_UNPIN_CD_LRU() */
+#define H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \
+{ \
+ /* Insert the entry at the head of either the clean or \
+ * dirty LRU list as appropriate. \
+ */ \
+ if((entry_ptr)->is_dirty) { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+} /* H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU() */
#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+#define H5C__UPDATE_RP_FOR_EVICTION_CD_LRU(cache_ptr, entry_ptr, fail_val)
+#define H5C__UPDATE_RP_FOR_FLUSH_CD_LRU(cache_ptr, entry_ptr, fail_val)
+#define H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU(cache_ptr, entry_ptr, fail_val)
+#define H5C__UPDATE_RP_FOR_INSERTION_CD_LRU(cache_ptr, entry_ptr, fail_val)
+#define H5C__UPDATE_RP_FOR_PROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val)
+#define H5C__UPDATE_RP_FOR_MOVE_CD_LRU(cache_ptr, entry_ptr, was_dirty, fail_val)
+#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU(cache_ptr, entry_ptr, new_size, fail_val)
+#define H5C__UPDATE_RP_FOR_UNPIN_CD_LRU(cache_ptr, entry_ptr, fail_val)
+#define H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val)
+
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+/*-------------------------------------------------------------------------
+ *
+ * Macro: H5C__UPDATE_RP_FOR_EVICTION
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * eviction of the specified cache entry.
+ *
+ * Programmer: John Mainzer, 5/10/04
+ *
+ *-------------------------------------------------------------------------
+ */
+
#define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( !((entry_ptr)->is_pinned) ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- /* modified LRU specific code */ \
- \
- /* remove the entry from the LRU list. */ \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_protected); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0); \
+ HDassert(!(entry_ptr)->is_pinned); \
+ HDassert((entry_ptr)->size > 0); \
\
+ /* Remove the entry from the LRU list */ \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
+ /* Remove the entry from the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_EVICTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \
} /* H5C__UPDATE_RP_FOR_EVICTION */
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_FLUSH
+ * Macro: H5C__UPDATE_RP_FOR_FLUSH
*
* Purpose: Update the replacement policy data structures for a flush
- * of the specified cache entry.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
- *
- * Return: N/A
+ * of the specified cache entry.
*
* Programmer: John Mainzer, 5/6/04
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
#define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_protected); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0); \
+ HDassert((entry_ptr)->size > 0); \
\
- if ( ! ((entry_ptr)->is_pinned) ) { \
- \
- /* modified LRU specific code */ \
- \
- /* remove the entry from the LRU list, and re-insert it at the \
- * head. \
+ if(!(entry_ptr)->is_pinned) { \
+ /* Remove the entry from its location in the LRU list \
+ * and re-insert it at the head of the list. \
*/ \
- \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
(cache_ptr)->LRU_list_len, \
@@ -1517,722 +1438,289 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* since the entry is being flushed or cleared, one would think \
- * that it must be dirty -- but that need not be the case. Use the \
- * dirty flag to infer whether the entry is on the clean or dirty \
- * LRU list, and remove it. Then insert it at the head of the \
- * clean LRU list. \
- * \
- * The function presumes that a dirty entry will be either cleared \
- * or flushed shortly, so it is OK if we put a dirty entry on the \
- * clean LRU list. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, (fail_val)) \
- } else { \
- H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, (fail_val)) \
- } \
- \
- H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, (fail_val)) \
- \
- /* End modified LRU specific code. */ \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_FLUSH_CD_LRU(cache_ptr, entry_ptr, fail_val) \
} \
} /* H5C__UPDATE_RP_FOR_FLUSH */
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( ! ((entry_ptr)->is_pinned) ) { \
- \
- /* modified LRU specific code */ \
- \
- /* remove the entry from the LRU list, and re-insert it at the \
- * head. \
- */ \
- \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* End modified LRU specific code. */ \
- } \
-} /* H5C__UPDATE_RP_FOR_FLUSH */
-
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND
+ * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND
*
* Purpose: Update the replacement policy data structures for an
- * insertion of the specified cache entry.
+ * insertion of the specified cache entry.
*
- * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the
- * new entry as the LEAST recently used entry, not the
- * most recently used.
- *
- * For now at least, this macro should only be used in
- * the reconstruction of the metadata cache from a cache
- * image block.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
- *
- * Return: N/A
+ * Unlike H5C__UPDATE_RP_FOR_INSERTION below, insert a non-pinned
+ * new entry as the LEAST recently used entry, not the
+ * most recently used.
*
* Programmer: John Mainzer, 8/15/15
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
-#define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the tail of the LRU list. */ \
- \
- H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* insert the entry at the tail of the clean or dirty LRU list as \
- * appropriate. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, (fail_val)) \
- } else { \
- H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, (fail_val)) \
- } \
- \
- /* End modified LRU specific code. */ \
- } \
-}
-
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
#define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_protected); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0 ); \
+ HDassert((entry_ptr)->size > 0 ); \
\
+ if((entry_ptr)->is_pinned) { \
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- \
} else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the tail of the LRU list. */ \
- \
+ /* Insert the entry at the tail of the LRU list. */ \
H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
(cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* End modified LRU specific code. */ \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_INSERT_APPEND_CD_LRU(cache_ptr, entry_ptr, fail_val) \
} \
}
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_INSERTION
+ * Macro: H5C__UPDATE_RP_FOR_INSERTION
*
* Purpose: Update the replacement policy data structures for an
- * insertion of the specified cache entry.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
- *
- * Return: N/A
+ * insertion of the specified cache entry.
*
* Programmer: John Mainzer, 5/17/04
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
-#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the head of the LRU list. */ \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* insert the entry at the head of the clean or dirty LRU list as \
- * appropriate. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, (fail_val)) \
- } else { \
- H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, (fail_val)) \
- } \
- \
- /* End modified LRU specific code. */ \
- } \
-}
-
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the head of the LRU list. */ \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* End modified LRU specific code. */ \
- } \
+#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_protected); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0); \
+ HDassert((entry_ptr)->size > 0); \
+ \
+ if((entry_ptr)->is_pinned) { \
+ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ /* Insert the entry at the head of the LRU list. */ \
+ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_INSERTION_CD_LRU(cache_ptr, entry_ptr, fail_val) \
+ } \
}
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_PROTECT
+ * Macro: H5C__UPDATE_RP_FOR_PROTECT
*
* Purpose: Update the replacement policy data structures for a
- * protect of the specified cache entry.
- *
- * To do this, unlink the specified entry from any data
- * structures used by the replacement policy, and add the
- * entry to the protected list.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
+ * protect of the specified cache entry.
*
- * Return: N/A
+ * To do this, unlink the specified entry from any data
+ * structures used by the replacement policy (or the pinned list,
+ * which is outside of the replacement policy), and add the
+ * entry to the protected list.
*
* Programmer: John Mainzer, 5/17/04
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
-#define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* remove the entry from the LRU list. */ \
- \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* Similarly, remove the entry from the clean or dirty LRU list \
- * as appropriate. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- \
- H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, (fail_val)) \
- \
- } else { \
- \
- H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, (fail_val)) \
- } \
- \
- /* End modified LRU specific code. */ \
- } \
- \
- /* Regardless of the replacement policy, or whether the entry is \
- * pinned, now add the entry to the protected list. \
- */ \
- \
- H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
- (cache_ptr)->pl_tail_ptr, \
- (cache_ptr)->pl_len, \
- (cache_ptr)->pl_size, (fail_val)) \
-} /* H5C__UPDATE_RP_FOR_PROTECT */
-
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
#define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_protected); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0); \
+ HDassert((entry_ptr)->size > 0); \
\
+ if((entry_ptr)->is_pinned) { \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- \
} else { \
- \
- /* modified LRU specific code */ \
- \
- /* remove the entry from the LRU list. */ \
- \
+ /* Remove the entry from the LRU list. */ \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
(cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* End modified LRU specific code. */ \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_PROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \
} \
\
- /* Regardless of the replacement policy, or whether the entry is \
- * pinned, now add the entry to the protected list. \
+ /* Regardless of whether the entry is pinned, add it to the protected \
+ * list. \
*/ \
- \
H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
(cache_ptr)->pl_tail_ptr, \
(cache_ptr)->pl_len, \
(cache_ptr)->pl_size, (fail_val)) \
} /* H5C__UPDATE_RP_FOR_PROTECT */
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_MOVE
+ * Macro: H5C__UPDATE_RP_FOR_MOVE
*
* Purpose: Update the replacement policy data structures for a
- * move of the specified cache entry.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
- *
- * Return: N/A
+ * move of the specified cache entry.
*
* Programmer: John Mainzer, 5/17/04
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
-#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
+#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( ! ( (entry_ptr)->is_pinned ) && ! ( ((entry_ptr)->is_protected ) ) ) {\
- \
- /* modified LRU specific code */ \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0); \
+ HDassert((entry_ptr)->size > 0); \
\
- /* remove the entry from the LRU list, and re-insert it at the head. \
+ if(!(entry_ptr)->is_pinned && !(entry_ptr)->is_protected) { \
+ /* Remove the entry from the LRU list, and re-insert it at the head. \
*/ \
- \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
+ H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
(cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* remove the entry from either the clean or dirty LUR list as \
- * indicated by the was_dirty parameter \
- */ \
- if ( was_dirty ) { \
- \
- H5C__AUX_DLL_REMOVE((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (fail_val)) \
- \
- } else { \
- \
- H5C__AUX_DLL_REMOVE((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (fail_val)) \
- } \
- \
- /* insert the entry at the head of either the clean or dirty \
- * LRU list as appropriate. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- \
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (fail_val)) \
- \
- } else { \
- \
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (fail_val)) \
- } \
- \
- /* End modified LRU specific code. */ \
- } \
-} /* H5C__UPDATE_RP_FOR_MOVE */
-
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- if ( ! ( (entry_ptr)->is_pinned ) && ! ( ((entry_ptr)->is_protected ) ) ) {\
- \
- /* modified LRU specific code */ \
- \
- /* remove the entry from the LRU list, and re-insert it at the head. \
- */ \
- \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* End modified LRU specific code. */ \
- } \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_MOVE_CD_LRU(cache_ptr, entry_ptr, was_dirty, fail_val) \
+ } \
} /* H5C__UPDATE_RP_FOR_MOVE */
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE
+ * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE
*
* Purpose: Update the replacement policy data structures for a
- * size change of the specified cache entry.
+ * size change of the specified cache entry.
*
- * To do this, determine if the entry is pinned. If it is,
- * update the size of the pinned entry list.
+ * To do this, determine if the entry is pinned. If it is,
+ * update the size of the pinned entry list.
*
- * If it isn't pinned, the entry must handled by the
- * replacement policy. Update the appropriate replacement
- * policy data structures.
+ * If it isn't pinned, the entry must handled by the
+ * replacement policy. Update the appropriate replacement
+ * policy data structures.
*
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
- *
- * Return: N/A
+ * If the entry is accessed with collective operations for
+ * parallel I/O, update that list.
*
* Programmer: John Mainzer, 8/23/06
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+#ifdef H5_HAVE_PARALLEL
+
+#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL(cache_ptr, entry_ptr, new_size, fail_val) \
+{ \
+ if((entry_ptr)->coll_access) { \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \
+ (cache_ptr)->coll_list_size, \
+ (entry_ptr)->size, \
+ (new_size), (fail_val)); \
+ \
+ } \
+} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL() */
+
+#else /* H5_HAVE_PARALLEL */
+
+#define H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL(cache_ptr, entry_ptr, new_size, fail_val)
+
+#endif /* H5_HAVE_PARALLEL */
#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( new_size > 0 ); \
- \
- if ( (entry_ptr)->coll_access ) { \
- \
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \
- (cache_ptr)->coll_list_size, \
- (entry_ptr)->size, \
- (new_size), (fail_val)); \
- \
- } \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_protected); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0); \
+ HDassert((entry_ptr)->size > 0 ); \
+ HDassert(new_size > 0 ); \
\
- if ( (entry_ptr)->is_pinned ) { \
+ /* Maintain the collective access list, if enabled */ \
+ H5C__UPDATE_RP_FOR_SIZE_CHANGE_COLL(cache_ptr, entry_ptr, new_size, fail_val) \
\
+ if((entry_ptr)->is_pinned) { \
H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
(cache_ptr)->pel_size, \
(entry_ptr)->size, \
- (new_size), (fail_val)); \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* Update the size of the LRU list */ \
- \
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, \
- (entry_ptr)->size, \
- (new_size), (fail_val)); \
- \
- /* Similarly, update the size of the clean or dirty LRU list as \
- * appropriate. At present, the entry must be clean, but that \
- * could change. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- \
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (entry_ptr)->size, \
- (new_size), (fail_val)); \
- \
- } else { \
- \
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (entry_ptr)->size, \
- (new_size), (fail_val)); \
- } \
- \
- /* End modified LRU specific code. */ \
- } \
- \
-} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */
-
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( new_size > 0 ); \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, \
- (entry_ptr)->size, \
- (new_size), (fail_val)); \
- \
+ (new_size), (fail_val)) \
} else { \
- \
- /* modified LRU specific code */ \
- \
/* Update the size of the LRU list */ \
- \
H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, \
(entry_ptr)->size, \
- (new_size), (fail_val)); \
+ (new_size), (fail_val)) \
\
- /* End modified LRU specific code. */ \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_SIZE_CHANGE_CD_LRU(cache_ptr, entry_ptr, new_size, fail_val) \
} \
- \
} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_UNPIN
+ * Macro: H5C__UPDATE_RP_FOR_UNPIN
*
* Purpose: Update the replacement policy data structures for an
- * unpin of the specified cache entry.
- *
- * To do this, unlink the specified entry from the protected
- * entry list, and re-insert it in the data structures used
- * by the current replacement policy.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the macro
- * should switch on the current policy and act accordingly.
+ * unpin of the specified cache entry.
*
- * Return: N/A
+ * To do this, unlink the specified entry from the pinned
+ * entry list, and re-insert it in the data structures used
+ * by the current replacement policy.
*
* Programmer: John Mainzer, 3/22/06
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->is_pinned); \
- HDassert( (entry_ptr)->size > 0 ); \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert(!(entry_ptr)->is_protected); \
+ HDassert(!(entry_ptr)->is_read_only); \
+ HDassert((entry_ptr)->ro_ref_count == 0 ); \
+ HDassert((entry_ptr)->is_pinned); \
+ HDassert((entry_ptr)->size > 0); \
\
/* Regardless of the replacement policy, remove the entry from the \
* pinned entry list. \
@@ -2241,110 +1729,40 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
\
- /* modified LRU specific code */ \
- \
- /* insert the entry at the head of the LRU list. */ \
- \
+ /* Insert the entry at the head of the LRU list. */ \
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
(cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* Similarly, insert the entry at the head of either the clean \
- * or dirty LRU list as appropriate. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- \
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (fail_val)) \
- \
- } else { \
- \
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (fail_val)) \
- } \
- \
- /* End modified LRU specific code. */ \
- \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_UNPIN_CD_LRU(cache_ptr, entry_ptr, fail_val) \
} /* H5C__UPDATE_RP_FOR_UNPIN */
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->is_pinned); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- /* Regardless of the replacement policy, remove the entry from the \
- * pinned entry list. \
- */ \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the head of the LRU list. */ \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* End modified LRU specific code. */ \
- \
-} /* H5C__UPDATE_RP_FOR_UNPIN */
-
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_UNPROTECT
+ * Macro: H5C__UPDATE_RP_FOR_UNPROTECT
*
* Purpose: Update the replacement policy data structures for an
- * unprotect of the specified cache entry.
- *
- * To do this, unlink the specified entry from the protected
- * list, and re-insert it in the data structures used by the
- * current replacement policy.
- *
- * At present, we only support the modified LRU policy, so
- * this function deals with that case unconditionally. If
- * we ever support other replacement policies, the function
- * should switch on the current policy and act accordingly.
+ * unprotect of the specified cache entry.
*
- * Return: N/A
+ * To do this, unlink the specified entry from the protected
+ * list, and re-insert it in the data structures used by the
+ * current replacement policy.
*
* Programmer: John Mainzer, 5/19/04
*
*-------------------------------------------------------------------------
*/
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( (entry_ptr)->is_protected); \
- HDassert( (entry_ptr)->size > 0 ); \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
+ HDassert((entry_ptr)->is_protected); \
+ HDassert((entry_ptr)->size > 0); \
\
/* Regardless of the replacement policy, remove the entry from the \
* protected list. \
@@ -2353,88 +1771,23 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
(cache_ptr)->pl_size, (fail_val)) \
\
- if ( (entry_ptr)->is_pinned ) { \
- \
+ if((entry_ptr)->is_pinned) { \
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- \
} else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the head of the LRU list. */ \
- \
+ /* Insert the entry at the head of the LRU list. */ \
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
(cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* Similarly, insert the entry at the head of either the clean or \
- * dirty LRU list as appropriate. \
- */ \
- \
- if ( (entry_ptr)->is_dirty ) { \
- \
- H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, (fail_val)) \
- \
- } else { \
- \
- H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, (fail_val)) \
- } \
- \
- /* End modified LRU specific code. */ \
+ /* Maintain the clean & dirty LRU lists, if enabled */ \
+ H5C__UPDATE_RP_FOR_UNPROTECT_CD_LRU(cache_ptr, entry_ptr, fail_val) \
} \
- \
} /* H5C__UPDATE_RP_FOR_UNPROTECT */
-#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( (entry_ptr)->is_protected); \
- HDassert( (entry_ptr)->size > 0 ); \
- \
- /* Regardless of the replacement policy, remove the entry from the \
- * protected list. \
- */ \
- H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \
- (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
- (cache_ptr)->pl_size, (fail_val)) \
- \
- if ( (entry_ptr)->is_pinned ) { \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, (fail_val)) \
- \
- } else { \
- \
- /* modified LRU specific code */ \
- \
- /* insert the entry at the head of the LRU list. */ \
- \
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
- \
- /* End modified LRU specific code. */ \
- } \
-} /* H5C__UPDATE_RP_FOR_UNPROTECT */
-
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
#ifdef H5_HAVE_PARALLEL
@@ -2447,12 +1800,10 @@ if ( ( (cache_ptr)->index_size != \
/*-------------------------------------------------------------------------
*
- * Macro: H5C__INSERT_IN_COLL_LIST
+ * Macro: H5C__INSERT_IN_COLL_LIST
*
* Purpose: Insert entry into collective entries list
*
- * Return: N/A
- *
* Programmer: Mohamad Chaarawi
*
*-------------------------------------------------------------------------
@@ -2460,29 +1811,24 @@ if ( ( (cache_ptr)->index_size != \
#define H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- \
- /* insert the entry at the head of the list. */ \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert(entry_ptr); \
\
+ /* Insert the entry at the head of the list. */ \
H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \
(cache_ptr)->coll_tail_ptr, \
(cache_ptr)->coll_list_len, \
- (cache_ptr)->coll_list_size, \
- (fail_val)) \
- \
+ (cache_ptr)->coll_list_size, (fail_val)) \
} /* H5C__INSERT_IN_COLL_LIST */
/*-------------------------------------------------------------------------
*
- * Macro: H5C__REMOVE_FROM_COLL_LIST
+ * Macro: H5C__REMOVE_FROM_COLL_LIST
*
* Purpose: Remove entry from collective entries list
*
- * Return: N/A
- *
* Programmer: Mohamad Chaarawi
*
*-------------------------------------------------------------------------
@@ -2490,29 +1836,24 @@ if ( ( (cache_ptr)->index_size != \
#define H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- \
- /* remove the entry from the list. */ \
+ HDassert(cache_ptr); \
+ HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
+ HDassert(entry_ptr); \
\
+ /* Remove the entry from the list. */ \
H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \
(cache_ptr)->coll_tail_ptr, \
(cache_ptr)->coll_list_len, \
- (cache_ptr)->coll_list_size, \
- (fail_val)) \
- \
+ (cache_ptr)->coll_list_size, (fail_val)) \
} /* H5C__REMOVE_FROM_COLL_LIST */
/*-------------------------------------------------------------------------
*
- * Macro: H5C__MOVE_TO_TOP_IN_COLL_LIST
+ * Macro: H5C__MOVE_TO_TOP_IN_COLL_LIST
*
* Purpose: Update entry position in collective entries list
*
- * Return: N/A
- *
* Programmer: Mohamad Chaarawi
*
*-------------------------------------------------------------------------
@@ -2520,22 +1861,20 @@ if ( ( (cache_ptr)->index_size != \
#define H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \
{ \
- HDassert((cache_ptr)); \
+ HDassert(cache_ptr); \
HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
- HDassert((entry_ptr)); \
+ HDassert(entry_ptr); \
\
/* Remove entry and insert at the head of the list. */ \
H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \
(cache_ptr)->coll_tail_ptr, \
(cache_ptr)->coll_list_len, \
- (cache_ptr)->coll_list_size, \
- (fail_val)) \
+ (cache_ptr)->coll_list_size, (fail_val)) \
\
H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \
(cache_ptr)->coll_tail_ptr, \
(cache_ptr)->coll_list_len, \
- (cache_ptr)->coll_list_size, \
- (fail_val)) \
+ (cache_ptr)->coll_list_size, (fail_val)) \
\
} /* H5C__MOVE_TO_TOP_IN_COLL_LIST */
#endif /* H5_HAVE_PARALLEL */
@@ -2580,7 +1919,7 @@ typedef struct H5C_tag_info_t {
hbool_t corked; /* Whether this object is corked */
/* Hash table fields */
- UT_hash_handle hh; /* Hash table handle (must be LAST) */
+ UT_hash_handle hh; /* Hash table handle (must be LAST) */
} H5C_tag_info_t;
@@ -2588,7 +1927,7 @@ typedef struct H5C_tag_info_t {
*
* structure H5C_t
*
- * Catchall structure for all variables specific to an instance of the cache.
+ * Structure for all information specific to an instance of the cache.
*
* While the cache was designed with multiple replacement policies in mind,
* at present only a modified form of LRU is supported.
@@ -2598,13 +1937,11 @@ typedef struct H5C_tag_info_t {
* is used to track dirty entries.
*
* magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
- * This field is used to validate pointers to instances of
- * H5C_t.
+ * This field is used to validate pointers to instances of H5C_t.
*
- * flush_in_progress: Boolean flag indicating whether a flush is in
- * progress.
+ * flush_in_progress: Boolean flag indicating whether a flush is in progress.
*
- * log_info: Information used by the MDC logging functionality.
+ * log_info: Information used by the cache logging functionality.
* Described in H5Clog.h.
*
* aux_ptr: Pointer to void used to allow wrapper code to associate
@@ -2613,29 +1950,27 @@ typedef struct H5C_tag_info_t {
*
* max_type_id: Integer field containing the maximum type id number assigned
* to a type of entry in the cache. All type ids from 0 to
- * max_type_id inclusive must be defined. The names of the
- * types are stored in the type_name_table discussed below, and
- * indexed by the ids.
+ * max_type_id inclusive must be defined.
*
* class_table_ptr: Pointer to an array of H5C_class_t of length
- * max_type_id + 1. Entry classes for the cache.
+ * max_type_id + 1. Entry classes for the cache.
*
* max_cache_size: Nominal maximum number of bytes that may be stored in the
- * cache. This value should be viewed as a soft limit, as the
- * cache can exceed this value under the following circumstances:
+ * cache. This value should be viewed as a soft limit, as the
+ * cache can exceed this value under the following circumstances:
*
- * a) All entries in the cache are protected, and the cache is
- * asked to insert a new entry. In this case the new entry
- * will be created. If this causes the cache to exceed
- * max_cache_size, it will do so. The cache will attempt
- * to reduce its size as entries are unprotected.
+ * a) All entries in the cache are protected, and the cache is
+ * asked to insert a new entry. In this case the new entry
+ * will be created. If this causes the cache to exceed
+ * max_cache_size, it will do so. The cache will attempt
+ * to reduce its size as entries are unprotected.
*
- * b) When running in parallel mode, the cache may not be
+ * b) When running in parallel mode, the cache may not be
* permitted to flush a dirty entry in response to a read.
* If there are no clean entries available to evict, the
* cache will exceed its maximum size. Again the cache
- * will attempt to reduce its size to the max_cache_size
- * limit on the next cache write.
+ * will attempt to reduce its size to the max_cache_size
+ * limit on the next cache write.
*
* c) When an entry increases in size, the cache may exceed
* the max_cache_size limit until the next time the cache
@@ -2646,16 +1981,15 @@ typedef struct H5C_tag_info_t {
* field is set to true.
*
* min_clean_size: Nominal minimum number of clean bytes in the cache.
- * The cache attempts to maintain this number of bytes of
- * clean data so as to avoid case b) above. Again, this is
- * a soft limit.
+ * The cache attempts to maintain this number of bytes of clean
+ * data so as to avoid case b) above. Again, this is a soft limit.
*
* close_warning_received: Boolean flag indicating that a file closing
* warning has been received.
*
*
- * In addition to the call back functions required for each entry, the
- * cache requires the following call back functions for this instance of
+ * In addition to the callback functions required for each entry's class,
+ * the cache requires the following callback functions for an instance of
* the cache as a whole:
*
* check_write_permitted: In certain applications, the cache may not
@@ -2690,7 +2024,7 @@ typedef struct H5C_tag_info_t {
* following fields support that index.
*
* We sometimes need to visit all entries in the cache, they are stored in
- * the index list.
+ * an index list.
*
* The index list is maintained by the same macros that maintain the
* index, and must have the same length and size as the index proper.
@@ -2699,13 +2033,13 @@ typedef struct H5C_tag_info_t {
* the cache.
*
* index_size: Number of bytes of cache entries currently stored in the
- * hash table used to index the cache.
+ * hash table used to index the cache.
*
- * This value should not be mistaken for footprint of the
- * cache in memory. The average cache entry is small, and
- * the cache has a considerable overhead. Multiplying the
- * index_size by three should yield a conservative estimate
- * of the cache's memory footprint.
+ * This value should not be mistaken for footprint of the
+ * cache in memory. The average cache entry is small, and
+ * the cache has a considerable overhead. Multiplying the
+ * index_size by three should yield a conservative estimate
+ * of the cache's memory footprint.
*
* index_ring_len: Array of integer of length H5C_RING_NTYPES used to
* maintain a count of entries in the index by ring. Note
@@ -2718,7 +2052,7 @@ typedef struct H5C_tag_info_t {
* equal the value stored in index_size above.
*
* clean_index_size: Number of bytes of clean entries currently stored in
- * the hash table. Note that the index_size field (above)
+ * the hash table. Note that the index_size field (above)
* is also the sum of the sizes of all entries in the cache.
* Thus we should have the invariant that clean_index_size +
* dirty_index_size == index_size.
@@ -2735,7 +2069,7 @@ typedef struct H5C_tag_info_t {
* must equal the value stored in clean_index_size above.
*
* dirty_index_size: Number of bytes of dirty entries currently stored in
- * the hash table. Note that the index_size field (above)
+ * the hash table. Note that the index_size field (above)
* is also the sum of the sizes of all entries in the cache.
* Thus we should have the invariant that clean_index_size +
* dirty_index_size == index_size.
@@ -2745,7 +2079,7 @@ typedef struct H5C_tag_info_t {
* index by ring. Note that the sum of all cells in this array
* must equal the value stored in dirty_index_size above.
*
- * index: Array of pointer to H5C_cache_entry_t of size
+ * index: Array of pointers to H5C_cache_entry_t of size
* H5C__HASH_TABLE_LEN. At present, this value is a power
* of two, not the usual prime number.
*
@@ -2760,7 +2094,7 @@ typedef struct H5C_tag_info_t {
* changing the H5C__HASH_FCN macro and the deletion of the
* H5C__HASH_MASK #define. No other changes should be required.
*
- * il_len: Number of entries on the index list.
+ * il_len: Number of entries on the index list.
*
* This must always be equal to index_len. As such, this
* field is redundant. However, the existing linked list
@@ -2768,7 +2102,7 @@ typedef struct H5C_tag_info_t {
* this field exists primarily to avoid adding complexity to
* these macros.
*
- * il_size: Number of bytes of cache entries currently stored in the
+ * il_size: Number of bytes of cache entries currently stored in the
* index list.
*
* This must always be equal to index_size. As such, this
@@ -2777,17 +2111,17 @@ typedef struct H5C_tag_info_t {
* this field exists primarily to avoid adding complexity to
* these macros.
*
- * il_head: Pointer to the head of the doubly linked list of entries in
- * the index list. Note that cache entries on this list are
+ * il_head: Pointer to the head of the doubly linked list of entries in
+ * the index list. Note that cache entries on this list are
* linked by their il_next and il_prev fields.
*
- * This field is NULL if the index is empty.
+ * This field is NULL if the index is empty.
*
- * il_tail: Pointer to the tail of the doubly linked list of entries in
- * the index list. Note that cache entries on this list are
- * linked by their il_next and il_prev fields.
+ * il_tail: Pointer to the tail of the doubly linked list of entries in
+ * the index list. Note that cache entries on this list are
+ * linked by their il_next and il_prev fields.
*
- * This field is NULL if the index is empty.
+ * This field is NULL if the index is empty.
*
*
* It is possible that an entry may be removed from the cache as the result
@@ -2799,7 +2133,7 @@ typedef struct H5C_tag_info_t {
*
* The following fields are maintained to facilitate this.
*
- * entries_removed_counter: Counter that is incremented each time an
+ * entries_removed_counter: Counter that is incremented each time an
* entry is removed from the cache by any means (eviction,
* expungement, or take ownership at this point in time).
* Functions that perform scans on lists may set this field
@@ -2807,7 +2141,7 @@ typedef struct H5C_tag_info_t {
* Unexpected changes to the counter indicate that an entry
* was removed from the cache as a side effect of the flush.
*
- * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t
+ * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t
* which contained the last entry to be removed from the cache,
* or NULL if there either is no such entry, or if a function
* performing a scan of a list has set this field to NULL prior
@@ -2817,8 +2151,7 @@ typedef struct H5C_tag_info_t {
* maintained to allow functions that perform scans of lists
* to compare this pointer with their pointers to next, thus
* allowing them to avoid unnecessary restarts of scans if the
- * pointers don't match, and if entries_removed_counter is
- * one.
+ * pointers don't match, and if entries_removed_counter is one.
*
* entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t
* which contains the 'next' entry for an iteration. Removing
@@ -2853,50 +2186,47 @@ typedef struct H5C_tag_info_t {
* the next flush or close.
*
* slist_enabled: Boolean flag used to control operation of the skip
- * list. If this filed is FALSE, operations on the
- * slist are no-ops, and the slist must be empty. If
- * it is TRUE, operations on the slist proceed as usual,
- * and all dirty entries in the metadata cache must be
- * listed in the slist.
+ * list. If this filed is FALSE, operations on the slist are
+ * no-ops, and the slist must be empty. If it is TRUE,
+ * operations on the skip list proceed as usual, and all dirty
+ * entries in the metadata cache must be listed in the skip list.
*
* slist_changed: Boolean flag used to indicate whether the contents of
- * the slist has changed since the last time this flag was
+ * the skip list has changed since the last time this flag was
* reset. This is used in the cache flush code to detect
* conditions in which pre-serialize or serialize callbacks
- * have modified the slist -- which obliges us to restart
- * the scan of the slist from the beginning.
+ * have modified the skip list -- which obliges us to restart
+ * the scan of the skip list from the beginning.
*
- * slist_len: Number of entries currently in the skip list
- * used to maintain a sorted list of dirty entries in the
- * cache.
+ * slist_len: Number of entries currently in the skip list. Used to
+ * maintain a sorted list of dirty entries in the cache.
*
- * slist_size: Number of bytes of cache entries currently stored in the
- * skip list used to maintain a sorted list of
- * dirty entries in the cache.
+ * slist_size: Number of bytes of cache entries currently stored in the
+ * skip list used to maintain a sorted list of dirty entries in
+ * the cache.
*
* slist_ring_len: Array of integer of length H5C_RING_NTYPES used to
- * maintain a count of entries in the slist by ring. Note
+ * maintain a count of entries in the skip list by ring. Note
* that the sum of all the cells in this array must equal
* the value stored in slist_len above.
*
* slist_ring_size: Array of size_t of length H5C_RING_NTYPES used to
- * maintain the sum of the sizes of all entries in the
- * slist by ring. Note that the sum of all cells in this
- * array must equal the value stored in slist_size above.
+ * maintain the sum of the sizes of all entries in the skip list
+ * by ring. Note that the sum of all cells in this array must
+ * equal the value stored in slist_size above.
*
- * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
- * list of dirty entries in the cache. This sorted list has
- * two uses:
+ * slist_ptr: Pointer to the instance of H5SL_t used maintain a sorted
+ * list of dirty entries in the cache. This sorted list has
+ * two uses:
*
- * a) It allows us to flush dirty entries in increasing address
- * order, which results in significant savings.
+ * a) It allows us to flush dirty entries in increasing address
+ * order, which results in significant savings.
*
- * b) It facilitates checking for adjacent dirty entries when
- * attempting to evict entries from the cache.
+ * b) It facilitates checking for adjacent dirty entries when
+ * attempting to evict entries from the cache.
*
* num_last_entries: The number of entries in the cache that can only be
- * flushed after all other entries in the cache have
- * been flushed.
+ * flushed after all other entries in the cache have been flushed.
*
* Note: At this time, the this field will only be applied to
* two types of entries: the superblock and the file driver info
@@ -2909,11 +2239,11 @@ typedef struct H5C_tag_info_t {
* compiled in when H5C_DO_SANITY_CHECKS is TRUE.
*
* slist_len_increase: Number of entries that have been added to the
- * slist since the last time this field was set to zero.
+ * skip list since the last time this field was set to zero.
* Note that this value can be negative.
*
* slist_size_increase: Total size of all entries that have been added
- * to the slist since the last time this field was set to
+ * to the skip list since the last time this field was set to
* zero. Note that this value can be negative.
*
* Cache entries belonging to a particular object are "tagged" with that
@@ -2921,71 +2251,69 @@ typedef struct H5C_tag_info_t {
*
* The following fields are maintained to facilitate this.
*
- * tag_list: A collection to track entries that belong to an object.
- * Each H5C_tag_info_t struct on the tag list corresponds to
- * a particular object in the file. Tagged entries can be
- * flushed or evicted as a group, or corked to prevent entries
- * from being evicted from the cache.
+ * tag_list: A collection to track entries that belong to an object.
+ * Each H5C_tag_info_t struct on the tag list corresponds to a
+ * particular object in the file. Tagged entries can be flushed
+ * or evicted as a group, or corked to prevent entries from being
+ * evicted from the cache.
*
- * "Global" entries, like the superblock and the file's
- * freelist, as well as shared entries like global
- * heaps and shared object header messages, are not tagged.
+ * "Global" entries, like the superblock and the file's freelist,
+ * as well as shared entries like global heaps and shared object
+ * header messages, are not tagged.
*
- * ignore_tags: Boolean flag to disable tag validation during entry insertion.
+ * ignore_tags: Boolean flag to disable tag validation during entry insertion.
*
* num_objs_corked: Unsigned integer field containing the number of objects
- * that are "corked". The "corked" status of an object is
- * found by searching the "tag_list". This field is added
- * for optimization so that the skip list search on "tag_list"
- * can be skipped if this field is zero, i.e. no "corked"
- * objects.
+ * that are "corked". The "corked" status of an object is found by
+ * searching the "tag_list". This field is added for optimization
+ * so that the skip list search on "tag_list" can be skipped if this
+ * field is zero, i.e. no "corked" objects.
*
* When a cache entry is protected, it must be removed from the LRU
- * list(s) as it cannot be either flushed or evicted until it is unprotected.
+ * list(s), as it cannot be either flushed or evicted until it is unprotected.
* The following fields are used to implement the protected list (pl).
*
- * pl_len: Number of entries currently residing on the protected list.
+ * pl_len: Number of entries currently residing on the protected list.
*
- * pl_size: Number of bytes of cache entries currently residing on the
- * protected list.
+ * pl_size: Number of bytes of cache entries currently residing on the
+ * protected list.
*
* pl_head_ptr: Pointer to the head of the doubly linked list of protected
- * entries. Note that cache entries on this list are linked
- * by their next and prev fields.
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
*
- * This field is NULL if the list is empty.
+ * This field is NULL if the list is empty.
*
* pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
- * entries. Note that cache entries on this list are linked
- * by their next and prev fields.
+ * entries. Note that cache entries on this list are linked
+ * by their next and prev fields.
*
- * This field is NULL if the list is empty.
+ * This field is NULL if the list is empty.
*
*
* For very frequently used entries, the protect/unprotect overhead can
- * become burdensome. To avoid this overhead, the cache
- * allows entries to be "pinned". A pinned entry is similar to a
- * protected entry, in the sense that it cannot be evicted, and that
- * the entry can be modified at any time.
+ * become burdensome. To avoid this overhead, the cache allows entries to
+ * be "pinned". A pinned entry is similar to a protected entry, in the
+ * sense that it cannot be evicted, and that the entry can be modified at
+ * any time.
*
* Pinning an entry has the following implications:
*
- * 1) A pinned entry cannot be evicted. Thus unprotected
- * pinned entries reside in the pinned entry list, instead
- * of the LRU list(s) (or other lists maintained by the current
- * replacement policy code).
+ * 1) A pinned entry cannot be evicted. Thus unprotected pinned
+ * entries reside in the pinned entry list, instead of the LRU
+ * list(s) or other lists maintained by the current replacement
+ * policy code.
*
* 2) A pinned entry can be accessed or modified at any time.
* This places an additional burden on the associated pre-serialize
- * and serialize callbacks, which must ensure the entry is in
- * a consistent state before creating an image of it.
+ * and serialize callbacks, which must ensure the entry is in
+ * a consistent state before creating an image of it.
*
* 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
*
* 4) The flush-destroy code must allow pinned entries to
- * be unpinned (and possibly unprotected) during the
- * flush.
+ * be unpinned (and possibly unprotected) during the flush.
*
* Since pinned entries cannot be evicted, they must be kept on a pinned
* entry list (pel), instead of being entrusted to the replacement policy
@@ -2993,23 +2321,22 @@ typedef struct H5C_tag_info_t {
*
* Maintaining the pinned entry list requires the following fields:
*
- * pel_len: Number of entries currently residing on the pinned
- * entry list.
+ * pel_len: Number of entries currently residing on the pinned entry list.
*
- * pel_size: Number of bytes of cache entries currently residing on
+ * pel_size: Number of bytes of cache entries currently residing on
* the pinned entry list.
*
* pel_head_ptr: Pointer to the head of the doubly linked list of pinned
* but not protected entries. Note that cache entries on
* this list are linked by their next and prev fields.
*
- * This field is NULL if the list is empty.
+ * This field is NULL if the list is empty.
*
* pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
* but not protected entries. Note that cache entries on
* this list are linked by their next and prev fields.
*
- * This field is NULL if the list is empty.
+ * This field is NULL if the list is empty.
*
*
* The cache must have a replacement policy, and the fields supporting this
@@ -3033,10 +2360,9 @@ typedef struct H5C_tag_info_t {
*
* When reading in parallel mode, we evict from the clean LRU list only.
* This implies that we must try to ensure that the clean LRU list is
- * reasonably well stocked at all times.
- *
- * We attempt to do this by trying to flush enough entries on each write
- * to keep the cLRU_list_size >= min_clean_size.
+ * reasonably well stocked at all times. We attempt to do this by trying
+ * to flush enough entries on each write to keep the cLRU_list_size >=
+ * min_clean_size.
*
* Even if we start with a completely clean cache, a sequence of protects
* without unprotects can empty the clean LRU list. In this case, the
@@ -3050,14 +2376,14 @@ typedef struct H5C_tag_info_t {
*
* LRU_list_len: Number of cache entries currently on the LRU list.
*
- * Observe that LRU_list_len + pl_len + pel_len must always
- * equal index_len.
+ * The LRU_list_len + pl_len + pel_len must always
+ * equal index_len.
*
* LRU_list_size: Number of bytes of cache entries currently residing on the
* LRU list.
*
- * Observe that LRU_list_size + pl_size + pel_size must always
- * equal index_size.
+ * The LRU_list_size + pl_size + pel_size must always
+ * equal index_size.
*
* LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
* entries on this list are linked by their next and prev fields.
@@ -3071,13 +2397,13 @@ typedef struct H5C_tag_info_t {
*
* cLRU_list_len: Number of cache entries currently on the clean LRU list.
*
- * Observe that cLRU_list_len + dLRU_list_len must always
+ * The cLRU_list_len + dLRU_list_len must always
* equal LRU_list_len.
*
* cLRU_list_size: Number of bytes of cache entries currently residing on
* the clean LRU list.
*
- * Observe that cLRU_list_size + dLRU_list_size must always
+ * The cLRU_list_size + dLRU_list_size must always
* equal LRU_list_size.
*
* cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
@@ -3094,12 +2420,12 @@ typedef struct H5C_tag_info_t {
*
* dLRU_list_len: Number of cache entries currently on the dirty LRU list.
*
- * Observe that cLRU_list_len + dLRU_list_len must always
+ * The cLRU_list_len + dLRU_list_len must always
* equal LRU_list_len.
*
* dLRU_list_size: Number of cache entries currently on the dirty LRU list.
*
- * Observe that cLRU_list_len + dLRU_list_len must always
+ * The cLRU_list_len + dLRU_list_len must always
* equal LRU_list_len.
*
* dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
@@ -3117,8 +2443,8 @@ typedef struct H5C_tag_info_t {
*
* Automatic cache size adjustment:
*
- * While the default cache size is adequate for most cases, we can run into
- * cases where the default is too small. Ideally, we will let the user
+ * While the default cache size is adequate for many cases, there are
+ * cases where the default is too small. Ideally, the user should
* adjust the cache size as required. However, this is not possible in all
* cases, so the cache has automatic cache size adjustment code.
*
@@ -3126,36 +2452,32 @@ typedef struct H5C_tag_info_t {
* the structure described below:
*
* size_increase_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * to increase the size of the cache. Rather than test for
- * all the ways this can happen, we simply set this flag when
- * we receive a new configuration.
+ * in the resize_ctl field, it may or may not be possible to
+ * increase the size of the cache. Rather than test for all the
+ * ways this can happen, we simply set this flag when we receive
+ * a new configuration.
*
* flash_size_increase_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * for a flash size increase to occur. We set this flag
- * whenever we receive a new configuration so as to avoid
- * repeated calculations.
+ * in the resize_ctl field, it may or may not be possible for a
+ * flash size increase to occur. We set this flag whenever we
+ * receive a new configuration so as to avoid repeated calculations.
*
* flash_size_increase_threshold: If a flash cache size increase is possible,
- * this field is used to store the minimum size of a new entry
- * or size increase needed to trigger a flash cache size
- * increase. Note that this field must be updated whenever
- * the size of the cache is changed.
+ * this field is used to store the minimum size of a new entry or size
+ * increase needed to trigger a flash cache size increase. Note that
+ * this field must be updated whenever the size of the cache is changed.
*
- * size_decrease_possible: Depending on the configuration data given
- * in the resize_ctl field, it may or may not be possible
- * to decrease the size of the cache. Rather than test for
- * all the ways this can happen, we simply set this flag when
- * we receive a new configuration.
+ * size_decrease_possible: Depending on the configuration data given in the
+ * resize_ctl field, it may or may not be possible to decrease the
+ * size of the cache. Rather than test for all the ways this can
+ * happen, we simply set this flag when we receive a new configuration.
*
* resize_enabled: This is another convenience flag which is set whenever
- * a new set of values for resize_ctl are provided. Very
- * simply:
+ * a new set of values for resize_ctl are provided. Very simply:
*
* resize_enabled = size_increase_possible || size_decrease_possible;
*
- * cache_full: Boolean flag used to keep track of whether the cache is
+ * cache_full: Boolean flag used to keep track of whether the cache is
* full, so we can refrain from increasing the size of a
* cache which hasn't used up the space allotted to it.
*
@@ -3177,18 +2499,16 @@ typedef struct H5C_tag_info_t {
* occur.
*
* msic_in_progress: As the metadata cache has become re-entrant, and as
- * the free space manager code has become more tightly
- * integrated with the metadata cache, it is possible that
- * a call to H5C_insert_entry() may trigger a call to
- * H5C_make_space_in_cache(), which, via H5C__flush_single_entry()
- * and client callbacks, may trigger an infinite regression
- * of calls to H5C_make_space_in_cache().
- *
- * The msic_in_progress boolean flag is used to detect this,
- * and prevent the infinite regression that would otherwise
- * occur.
- *
- * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
+ * the free space manager code has become more tightly integrated
+ * with the metadata cache, it is possible that a call to
+ * H5C_insert_entry() may trigger a call to H5C_make_space_in_cache(),
+ * which, via H5C__flush_single_entry() and client callbacks, may
+ * trigger an infinite regression of calls to H5C_make_space_in_cache().
+ *
+ * The msic_in_progress boolean flag is used to detect this,
+ * and prevent the infinite regression that would otherwise occur.
+ *
+ * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
* data for automatic cache resizing.
*
* epoch_markers_active: Integer field containing the number of epoch
@@ -3221,27 +2541,24 @@ typedef struct H5C_tag_info_t {
* in the ring buffer.
*
* epoch_markers: Array of instances of H5C_cache_entry_t of length
- * H5C__MAX_EPOCH_MARKERS. The entries are used as markers
- * in the LRU list to identify cache entries that haven't
- * been accessed for some (small) specified number of
- * epochs. These entries (if any) can then be evicted and
- * the cache size reduced -- ideally without evicting any
- * of the current working set. Needless to say, the epoch
- * length and the number of epochs before an unused entry
- * must be chosen so that all, or almost all, the working
- * set will be accessed before the limit.
- *
- * Epoch markers only appear in the LRU list, never in
- * the index or slist. While they are of type
- * H5C__EPOCH_MARKER_TYPE, and have associated class
- * functions, these functions should never be called.
- *
- * The addr fields of these instances of H5C_cache_entry_t
- * are set to the index of the instance in the epoch_markers
- * array, the size is set to 0, and the type field points
- * to the constant structure epoch_marker_class defined
- * in H5C.c. The next and prev fields are used as usual
- * to link the entry into the LRU list.
+ * H5C__MAX_EPOCH_MARKERS. The entries are used as markers in the
+ * LRU list to identify cache entries that haven't been accessed for
+ * some (small) specified number of epochs. These entries (if any)
+ * can then be evicted and the cache size reduced -- ideally without
+ * evicting any of the current working set. Needless to say, the epoch
+ * length and the number of epochs before an unused entry must be
+ * chosen so that all, or almost all, the working set will be accessed
+ * before the limit.
+ *
+ * Epoch markers only appear in the LRU list, never in the index or
+ * skip list. While they are of type H5C__EPOCH_MARKER_TYPE, and have
+ * associated class functions, these functions should never be called.
+ *
+ * The addr fields of these instances of H5C_cache_entry_t are set to
+ * the index of the instance in the epoch_markers array, the size is
+ * set to 0, and the type field points to the constant structure
+ * epoch_marker_class defined in H5Cepoch.c. The next and prev fields
+ * are used as usual to link the entry into the LRU list.
*
* All other fields are unused.
*
@@ -3252,36 +2569,33 @@ typedef struct H5C_tag_info_t {
* simple cache hit rate computation regardless of whether statistics
* collection is enabled. The following fields support this capability.
*
- * cache_hits: Number of cache hits since the last time the cache hit
- * rate statistics were reset. Note that when automatic cache
- * re-sizing is enabled, this field will be reset every automatic
- * resize epoch.
+ * cache_hits: Number of cache hits since the last time the cache hit rate
+ * statistics were reset. Note that when automatic cache re-sizing
+ * is enabled, this field will be reset every automatic resize epoch.
*
* cache_accesses: Number of times the cache has been accessed while
- * since the last since the last time the cache hit rate statistics
- * were reset. Note that when automatic cache re-sizing is enabled,
- * this field will be reset every automatic resize epoch.
+ * since the last since the last time the cache hit rate statistics
+ * were reset. Note that when automatic cache re-sizing is enabled,
+ * this field will be reset every automatic resize epoch.
*
*
* Metadata cache image management related fields.
*
- * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration
- * data for generation of a cache image on file close.
+ * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration
+ * data for generation of a cache image on file close.
*
- * serialization_in_progress: Boolean field that is set to TRUE iff
- * the cache is in the process of being serialized. This
- * field is needed to support the H5C_serialization_in_progress()
- * call, which is in turn required for sanity checks in some
- * cache clients.
+ * serialization_in_progress: Boolean field that is set to TRUE iff
+ * the cache is in the process of being serialized. This field is
+ * needed to support the H5C_serialization_in_progress() call, which
+ * is in turn required for sanity checks in some cache clients.
*
- * load_image: Boolean flag indicating that the metadata cache image
- * superblock extension message exists and should be
- * read, and the image block read and decoded on the next
- * call to H5C_protect().
+ * load_image: Boolean flag indicating that the metadata cache image
+ * superblock extension message exists and should be read, and the
+ * image block read and decoded on the next call to H5C_protect().
*
- * image_loaded: Boolean flag indicating that the metadata cache has
+ * image_loaded: Boolean flag indicating that the metadata cache has
* loaded the metadata cache image as directed by the
- * MDC cache image superblock extension message.
+ * cache image superblock extension message.
*
* delete_image: Boolean flag indicating whether the metadata cache image
* superblock message should be deleted and the cache image
@@ -3290,20 +2604,18 @@ typedef struct H5C_tag_info_t {
* This flag should be set to TRUE iff the file is opened
* R/W and there is a cache image to be read.
*
- * image_addr: haddr_t containing the base address of the on disk
- * metadata cache image, or HADDR_UNDEF if that value is
- * undefined. Note that this field is used both in the
- * construction and write, and the read and decode of
- * metadata cache image blocks.
+ * image_addr: The base address of the on-disk metadata cache image, or
+ * HADDR_UNDEF if that value is undefined. Note that this field
+ * is used both in the construction and write, and the read and
+ * decode of metadata cache image blocks.
*
- * image_len: hsize_t containing the size of the on disk metadata cache
- * image, or zero if that value is undefined. Note that this
- * field is used both in the construction and write, and the
- * read and decode of metadata cache image blocks.
+ * image_len: The size of the on disk metadata cache image, or zero if that
+ * value is undefined. Note that this field is used both in the
+ * construction and write, and the read and decode of metadata cache
+ * image blocks.
*
- * image_data_len: size_t containing the number of bytes of data in the
- * on disk metadata cache image, or zero if that value is
- * undefined.
+ * image_data_len: The number of bytes of data in the on disk metadata
+ * cache image, or zero if that value is undefined.
*
* In most cases, this value is the same as the image_len
* above. It exists to allow for metadata cache image blocks
@@ -3349,11 +2661,11 @@ typedef struct H5C_tag_info_t {
* The following fields are used assemble the cache image prior to
* writing it to disk.
*
- * num_entries_in_image: Unsigned integer field containing the number of entries
- * to be copied into the metadata cache image. Note that
- * this value will be less than the number of entries in
- * the cache, and the superblock and its related entries
- * are not written to the metadata cache image.
+ * num_entries_in_image: Unsigned integer field containing the number of
+ * entries to be copied into the metadata cache image. Note that
+ * this value will be less than the number of entries in the cache,
+ * and the superblock and its related entries are not written to the
+ * metadata cache image.
*
* image_entries: Pointer to a dynamically allocated array of instance of
* H5C_image_entry_t of length num_entries_in_image, or NULL
@@ -3363,19 +2675,19 @@ typedef struct H5C_tag_info_t {
*
* image_buffer: Pointer to the dynamically allocated buffer of length
* image_len in which the metadata cache image is assembled,
- * or NULL if that buffer does not exist.
+ * or NULL if that buffer does not exist.
*
*
* Free Space Manager Related fields:
*
- * The free space managers must be informed when we are about to close
- * or flush the file so that they order themselves accordingly. This used
- * to be done much later in the close process, but with cache image and
+ * The free space managers for the file must be informed when we are about to
+ * close or flush the file so that they order themselves accordingly. This
+ * used to be done much later in the close process, but with cache image and
* page buffering, this is no longer viable, as we must finalize the on
* disk image of all metadata much sooner.
*
* This is handled by the H5MF_settle_raw_data_fsm() and
- * H5MF_settle_meta_data_FSM() routines. As these calls are expensive,
+ * H5MF_settle_meta_data_fsm() routines. As these calls are expensive,
* the following fields are used to track whether the target free space
* managers are clean.
*
@@ -3409,151 +2721,122 @@ typedef struct H5C_tag_info_t {
* below. The first set are collected only when H5C_COLLECT_CACHE_STATS
* is true.
*
- * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
+ * hits: Array to record the number of times an entry with type id
* equal to the array index has been in cache when requested in
* the current epoch.
*
- * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
+ * misses: Array to record the number of times an entry with type id
* equal to the array index has not been in cache when
* requested in the current epoch.
*
- * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
+ * write_protects: Array to record the number of times an entry with
* type id equal to the array index has been write protected
* in the current epoch.
*
* Observe that (hits + misses) = (write_protects + read_protects).
*
- * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
+ * read_protects: Array to record the number of times an entry with
* type id equal to the array index has been read protected in
* the current epoch.
*
- * Observe that (hits + misses) = (write_protects + read_protects).
+ * Observe that (hits + misses) = (write_protects + read_protects).
*
- * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to maximum number of simultaneous read
+ * max_read_protects: Array to maximum number of simultaneous read
* protects on any entry with type id equal to the array index
* in the current epoch.
*
- * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
+ * insertions: Array to record the number of times an entry with type
* id equal to the array index has been inserted into the
* cache in the current epoch.
*
- * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * pinned_insertions: Array to record the number of times an entry
* with type id equal to the array index has been inserted
* pinned into the cache in the current epoch.
*
- * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times a dirty entry with type
- * id equal to the array index has been cleared in the current
- * epoch.
+ * clears: Array to record the number of times a dirty entry with type
+ * id equal to the array index has been cleared in the current epoch.
*
- * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
+ * flushes: Array to record the number of times an entry with type id
* equal to the array index has been written to disk in the
- * current epoch.
+ * current epoch.
*
- * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type id
+ * evictions: Array to record the number of times an entry with type id
* equal to the array index has been evicted from the cache in
* the current epoch.
*
- * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry with
+ * take_ownerships: Array to record the number of times an entry with
* type id equal to the array index has been removed from the
* cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch.
*
- * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been moved in the current
- * epoch.
+ * moves: Array to record the number of times an entry with type
+ * id equal to the array index has been moved in the current epoch.
*
- * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * entry_flush_moves: Array to record the number of times an entry
* with type id equal to the array index has been moved
* during its pre-serialize callback in the current epoch.
*
- * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * cache_flush_moves: Array to record the number of times an entry
* with type id equal to the array index has been moved
* during a cache flush in the current epoch.
*
- * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been pinned in the current
- * epoch.
+ * pins: Array to record the number of times an entry with type
+ * id equal to the array index has been pinned in the current epoch.
*
- * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
- * id equal to the array index has been unpinned in the current
- * epoch.
+ * unpins: Array to record the number of times an entry with type
+ * id equal to the array index has been unpinned in the current epoch.
*
- * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the number of times an entry with type
+ * dirty_pins: Array to record the number of times an entry with type
* id equal to the array index has been marked dirty while pinned
* in the current epoch.
*
- * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
+ * pinned_flushes: Array to record the number of times an entry
* with type id equal to the array index has been flushed while
* pinned in the current epoch.
*
- * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
- * cells are used to record the number of times an entry
+ * pinned_clears: Array to record the number of times an entry
* with type id equal to the array index has been cleared while
* pinned in the current epoch.
*
- * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * size_increases: Array to record the number of times an entry
* with type id equal to the array index has increased in
* size in the current epoch.
*
- * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
- * The cells are used to record the number of times an entry
+ * size_decreases: Array to record the number of times an entry
* with type id equal to the array index has decreased in
* size in the current epoch.
*
- * entry_flush_size_changes: Array of int64 of length
- * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
- * the number of times an entry with type id equal to the
- * array index has changed size while in its pre-serialize
- * callback.
+ * entry_flush_size_changes: Array to record the number of times an entry
+ * with type id equal to the array index has changed size while in
+ * its pre-serialize callback.
*
- * cache_flush_size_changes: Array of int64 of length
- * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
- * the number of times an entry with type id equal to the
- * array index has changed size during a cache flush
+ * cache_flush_size_changes: Array to record the number of times an entry
+ * with type id equal to the array index has changed size during a
+ * cache flush
*
* total_ht_insertions: Number of times entries have been inserted into the
* hash table in the current epoch.
*
* total_ht_deletions: Number of times entries have been deleted from the
- * hash table in the current epoch.
- *
- * successful_ht_searches: int64 containing the total number of successful
- * searches of the hash table in the current epoch.
+ * hash table in the current epoch.
*
- * total_successful_ht_search_depth: int64 containing the total number of
- * entries other than the targets examined in successful
- * searches of the hash table in the current epoch.
+ * successful_ht_searches: The total number of successful searches of the
+ * hash table in the current epoch.
*
- * failed_ht_searches: int64 containing the total number of unsuccessful
- * searches of the hash table in the current epoch.
+ * total_successful_ht_search_depth: The total number of entries other than
+ * the targets examined in successful searches of the hash table in
+ * the current epoch.
*
- * total_failed_ht_search_depth: int64 containing the total number of
- * entries examined in unsuccessful searches of the hash
+ * failed_ht_searches: The total number of unsuccessful searches of the hash
* table in the current epoch.
*
- * max_index_len: Largest value attained by the index_len field in the
- * current epoch.
+ * total_failed_ht_search_depth: The total number of entries examined in
+ * unsuccessful searches of the hash table in the current epoch.
*
- * max_index_size: Largest value attained by the index_size field in the
- * current epoch.
+ * max_index_len: Largest value attained by the index_len field in the
+ * current epoch.
+ *
+ * max_index_size: Largest value attained by the index_size field in the
+ * current epoch.
*
* max_clean_index_size: Largest value attained by the clean_index_size field
* in the current epoch.
@@ -3561,75 +2844,73 @@ typedef struct H5C_tag_info_t {
* max_dirty_index_size: Largest value attained by the dirty_index_size field
* in the current epoch.
*
- * max_slist_len: Largest value attained by the slist_len field in the
- * current epoch.
+ * max_slist_len: Largest value attained by the slist_len field in the
+ * current epoch.
*
- * max_slist_size: Largest value attained by the slist_size field in the
- * current epoch.
+ * max_slist_size: Largest value attained by the slist_size field in the
+ * current epoch.
*
* max_pl_len: Largest value attained by the pl_len field in the
- * current epoch.
+ * current epoch.
*
* max_pl_size: Largest value attained by the pl_size field in the
- * current epoch.
+ * current epoch.
*
* max_pel_len: Largest value attained by the pel_len field in the
- * current epoch.
+ * current epoch.
*
* max_pel_size: Largest value attained by the pel_size field in the
- * current epoch.
+ * current epoch.
*
* calls_to_msic: Total number of calls to H5C__make_space_in_cache
*
* total_entries_skipped_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C__make_space_in_cache().
+ * enforcing the min_clean_fraction in H5C__make_space_in_cache().
*
* total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries
- * skipped in H5C__make_space_in_cache(). Note that this can
- * only occur when a file is opened R/O with a cache image
- * containing dirty entries.
+ * skipped in H5C__make_space_in_cache(). Note that this can
+ * only occur when a file is opened R/O with a cache image
+ * containing dirty entries.
*
* total_entries_scanned_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C__make_space_in_cache().
+ * enforcing the min_clean_fraction in H5C__make_space_in_cache().
*
* max_entries_skipped_in_msic: Maximum number of clean entries skipped
- * in any one call to H5C__make_space_in_cache().
+ * in any one call to H5C__make_space_in_cache().
*
* max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched
- * entries skipped in any one call to H5C__make_space_in_cache().
- * Note that this can only occur when the file is opened
- * R/O with a cache image containing dirty entries.
+ * entries skipped in any one call to H5C__make_space_in_cache().
+ * Note that this can only occur when the file is opened
+ * R/O with a cache image containing dirty entries.
*
* max_entries_scanned_in_msic: Maximum number of entries scanned over
- * in any one call to H5C__make_space_in_cache().
+ * in any one call to H5C__make_space_in_cache().
*
* entries_scanned_to_make_space: Number of entries scanned only when looking
- * for entries to evict in order to make space in cache.
+ * for entries to evict in order to make space in cache.
*
*
* The following fields track statistics on cache images.
*
- * images_created: Integer field containing the number of cache images
- * created since the last time statistics were reset.
+ * images_created: The number of cache images created since the last
+ * time statistics were reset.
*
* At present, this field must always be either 0 or 1.
* Further, since cache images are only created at file
* close, this field should only be set at that time.
*
- * images_read: Integer field containing the number of cache images
- * read from file. Note that reading an image is different
- * from loading it -- reading the image means just that,
- * while loading the image refers to decoding it and loading
- * it into the metadata cache.
+ * images_read: The number of cache images read from file. Note that
+ * reading an image is different from loading it -- reading the
+ * image means just that, while loading the image refers to decoding
+ * it and loading it into the metadata cache.
*
- * In the serial case, image_read should always equal
- * images_loaded. However, in the parallel case, the
- * image should only be read by process 0. All other
- * processes should receive the cache image via a broadcast
- * from process 0.
+ * In the serial case, image_read should always equal images_loaded.
+ * However, in the parallel case, the image should only be read by
+ * process 0. All other processes should receive the cache image via
+ * a broadcast from process 0.
*
- * images_loaded: Integer field containing the number of cache images
- * loaded since the last time statistics were reset.
+ * images_loaded: The number of cache images loaded since the last time
+ * statistics were reset.
*
* At present, this field must always be either 0 or 1.
* Further, since cache images are only loaded at the
@@ -3649,25 +2930,24 @@ typedef struct H5C_tag_info_t {
* of prefetched entries are tracked in the flushes and evictions arrays
* discussed above.
*
- * prefetches: Number of prefetched entries that are loaded to the cache.
+ * prefetches: Number of prefetched entries that are loaded to the cache.
*
- * dirty_prefetches: Number of dirty prefetched entries that are loaded
+ * dirty_prefetches: Number of dirty prefetched entries that are loaded
* into the cache.
*
- * prefetch_hits: Number of prefetched entries that are actually used.
+ * prefetch_hits: Number of prefetched entries that are actually used.
*
*
- * Entries may move, load, dirty, and delete
- * other entries in their pre_serialize and serialize callbacks, there is
- * code to restart scans of lists so as to avoid
- * improper behavior if the next entry in the list is the target of one on
- * these operations.
+ * Entries may move, load, dirty, and delete other entries in their
+ * pre_serialize and serialize callbacks, there is code to restart scans of
+ * lists so as to avoid improper behavior if the next entry in the list is
+ * the target of one on these operations.
*
* The following fields are use to count such occurrences. They are used
* both in tests (to verify that the scan has been restarted), and to
* obtain estimates of how frequently these restarts occur.
*
- * slist_scan_restarts: Number of times a scan of the slist (that contains
+ * slist_scan_restarts: Number of times a scan of the skip list (that contains
* calls to H5C__flush_single_entry()) has been restarted to
* avoid potential issues with change of status of the next
* entry in the scan.
@@ -3689,42 +2969,35 @@ typedef struct H5C_tag_info_t {
* The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
* and H5C_COLLECT_CACHE_ENTRY_STATS are true.
*
- * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
+ * max_accesses: Array to record the maximum number of times any single
* entry with type id equal to the array index has been
* accessed in the current epoch.
*
- * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the minimum number of times any single
+ * min_accesses: Array to record the minimum number of times any single
* entry with type id equal to the array index has been
* accessed in the current epoch.
*
- * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
+ * max_clears: Array to record the maximum number of times any single
* entry with type id equal to the array index has been cleared
* in the current epoch.
*
- * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times any single
+ * max_flushes: Array to record the maximum number of times any single
* entry with type id equal to the array index has been
* flushed in the current epoch.
*
- * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum size of any single entry
+ * max_size: Array to record the maximum size of any single entry
* with type id equal to the array index that has resided in
* the cache in the current epoch.
*
- * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
- * are used to record the maximum number of times that any single
+ * max_pins: Array to record the maximum number of times that any single
* entry with type id equal to the array index that has been
* marked as pinned in the cache in the current epoch.
*
*
* Fields supporting testing:
*
- * prefix Array of char used to prefix debugging output. The
- * field is intended to allow marking of output of with
- * the processes mpi rank.
+ * prefix: Array of char used to prefix debugging output. The field is
+ * intended to allow marking of output of with the processes mpi rank.
*
* get_entry_ptr_from_addr_counter: Counter used to track the number of
* times the H5C_get_entry_ptr_from_addr() function has been
@@ -3735,238 +3008,238 @@ typedef struct H5C_tag_info_t {
struct H5C_t {
uint32_t magic;
- hbool_t flush_in_progress;
- H5C_log_info_t *log_info;
- void * aux_ptr;
- int32_t max_type_id;
- const H5C_class_t * const *class_table_ptr;
- size_t max_cache_size;
- size_t min_clean_size;
- H5C_write_permitted_func_t check_write_permitted;
- hbool_t write_permitted;
- H5C_log_flush_func_t log_flush;
- hbool_t evictions_enabled;
- hbool_t close_warning_received;
+ hbool_t flush_in_progress;
+ H5C_log_info_t * log_info;
+ void * aux_ptr;
+ int32_t max_type_id;
+ const H5C_class_t * const *class_table_ptr;
+ size_t max_cache_size;
+ size_t min_clean_size;
+ H5C_write_permitted_func_t check_write_permitted;
+ hbool_t write_permitted;
+ H5C_log_flush_func_t log_flush;
+ hbool_t evictions_enabled;
+ hbool_t close_warning_received;
/* Fields for maintaining the [hash table] index of entries */
- uint32_t index_len;
- size_t index_size;
+ uint32_t index_len;
+ size_t index_size;
uint32_t index_ring_len[H5C_RING_NTYPES];
- size_t index_ring_size[H5C_RING_NTYPES];
- size_t clean_index_size;
- size_t clean_index_ring_size[H5C_RING_NTYPES];
- size_t dirty_index_size;
- size_t dirty_index_ring_size[H5C_RING_NTYPES];
- H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN];
- uint32_t il_len;
- size_t il_size;
- H5C_cache_entry_t * il_head;
- H5C_cache_entry_t * il_tail;
+ size_t index_ring_size[H5C_RING_NTYPES];
+ size_t clean_index_size;
+ size_t clean_index_ring_size[H5C_RING_NTYPES];
+ size_t dirty_index_size;
+ size_t dirty_index_ring_size[H5C_RING_NTYPES];
+ H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN];
+ uint32_t il_len;
+ size_t il_size;
+ H5C_cache_entry_t * il_head;
+ H5C_cache_entry_t * il_tail;
/* Fields to detect entries removed during scans */
- int64_t entries_removed_counter;
- H5C_cache_entry_t * last_entry_removed_ptr;
- H5C_cache_entry_t * entry_watched_for_removal;
+ int64_t entries_removed_counter;
+ H5C_cache_entry_t * last_entry_removed_ptr;
+ H5C_cache_entry_t * entry_watched_for_removal;
/* Fields for maintaining list of in-order entries, for flushing */
- hbool_t slist_enabled;
- hbool_t slist_changed;
- uint32_t slist_len;
- size_t slist_size;
+ hbool_t slist_enabled;
+ hbool_t slist_changed;
+ uint32_t slist_len;
+ size_t slist_size;
uint32_t slist_ring_len[H5C_RING_NTYPES];
- size_t slist_ring_size[H5C_RING_NTYPES];
- H5SL_t * slist_ptr;
- uint32_t num_last_entries;
+ size_t slist_ring_size[H5C_RING_NTYPES];
+ H5SL_t * slist_ptr;
+ uint32_t num_last_entries;
#ifdef H5C_DO_SANITY_CHECKS
- int32_t slist_len_increase;
- int64_t slist_size_increase;
+ int32_t slist_len_increase;
+ int64_t slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
/* Fields for maintaining list of tagged entries */
- H5C_tag_info_t * tag_list;
- hbool_t ignore_tags;
- uint32_t num_objs_corked;
+ H5C_tag_info_t * tag_list;
+ hbool_t ignore_tags;
+ uint32_t num_objs_corked;
/* Fields for tracking protected entries */
- uint32_t pl_len;
- size_t pl_size;
- H5C_cache_entry_t * pl_head_ptr;
- H5C_cache_entry_t * pl_tail_ptr;
+ uint32_t pl_len;
+ size_t pl_size;
+ H5C_cache_entry_t * pl_head_ptr;
+ H5C_cache_entry_t * pl_tail_ptr;
/* Fields for tracking pinned entries */
- uint32_t pel_len;
- size_t pel_size;
- H5C_cache_entry_t * pel_head_ptr;
- H5C_cache_entry_t * pel_tail_ptr;
+ uint32_t pel_len;
+ size_t pel_size;
+ H5C_cache_entry_t * pel_head_ptr;
+ H5C_cache_entry_t * pel_tail_ptr;
/* Fields for complete LRU list of entries */
- uint32_t LRU_list_len;
- size_t LRU_list_size;
- H5C_cache_entry_t * LRU_head_ptr;
- H5C_cache_entry_t * LRU_tail_ptr;
+ uint32_t LRU_list_len;
+ size_t LRU_list_size;
+ H5C_cache_entry_t * LRU_head_ptr;
+ H5C_cache_entry_t * LRU_tail_ptr;
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
/* Fields for clean LRU list of entries */
- uint32_t cLRU_list_len;
- size_t cLRU_list_size;
- H5C_cache_entry_t * cLRU_head_ptr;
- H5C_cache_entry_t * cLRU_tail_ptr;
+ uint32_t cLRU_list_len;
+ size_t cLRU_list_size;
+ H5C_cache_entry_t * cLRU_head_ptr;
+ H5C_cache_entry_t * cLRU_tail_ptr;
/* Fields for dirty LRU list of entries */
- uint32_t dLRU_list_len;
- size_t dLRU_list_size;
- H5C_cache_entry_t * dLRU_head_ptr;
- H5C_cache_entry_t * dLRU_tail_ptr;
+ uint32_t dLRU_list_len;
+ size_t dLRU_list_size;
+ H5C_cache_entry_t * dLRU_head_ptr;
+ H5C_cache_entry_t * dLRU_tail_ptr;
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
#ifdef H5_HAVE_PARALLEL
/* Fields for collective metadata reads */
- uint32_t coll_list_len;
- size_t coll_list_size;
- H5C_cache_entry_t * coll_head_ptr;
- H5C_cache_entry_t * coll_tail_ptr;
+ uint32_t coll_list_len;
+ size_t coll_list_size;
+ H5C_cache_entry_t * coll_head_ptr;
+ H5C_cache_entry_t * coll_tail_ptr;
/* Fields for collective metadata writes */
- H5SL_t * coll_write_list;
+ H5SL_t * coll_write_list;
#endif /* H5_HAVE_PARALLEL */
/* Fields for automatic cache size adjustment */
- hbool_t size_increase_possible;
- hbool_t flash_size_increase_possible;
- size_t flash_size_increase_threshold;
- hbool_t size_decrease_possible;
- hbool_t resize_enabled;
- hbool_t cache_full;
- hbool_t size_decreased;
- hbool_t resize_in_progress;
- hbool_t msic_in_progress;
- H5C_auto_size_ctl_t resize_ctl;
+ hbool_t size_increase_possible;
+ hbool_t flash_size_increase_possible;
+ size_t flash_size_increase_threshold;
+ hbool_t size_decrease_possible;
+ hbool_t resize_enabled;
+ hbool_t cache_full;
+ hbool_t size_decreased;
+ hbool_t resize_in_progress;
+ hbool_t msic_in_progress;
+ H5C_auto_size_ctl_t resize_ctl;
/* Fields for epoch markers used in automatic cache size adjustment */
- int32_t epoch_markers_active;
- hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS];
- int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1];
- int32_t epoch_marker_ringbuf_first;
- int32_t epoch_marker_ringbuf_last;
- int32_t epoch_marker_ringbuf_size;
- H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS];
+ int32_t epoch_markers_active;
+ hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS];
+ int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1];
+ int32_t epoch_marker_ringbuf_first;
+ int32_t epoch_marker_ringbuf_last;
+ int32_t epoch_marker_ringbuf_size;
+ H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS];
/* Fields for cache hit rate collection */
- int64_t cache_hits;
- int64_t cache_accesses;
+ int64_t cache_hits;
+ int64_t cache_accesses;
/* fields supporting generation of a cache image on file close */
- H5C_cache_image_ctl_t image_ctl;
- hbool_t serialization_in_progress;
- hbool_t load_image;
- hbool_t image_loaded;
- hbool_t delete_image;
+ H5C_cache_image_ctl_t image_ctl;
+ hbool_t serialization_in_progress;
+ hbool_t load_image;
+ hbool_t image_loaded;
+ hbool_t delete_image;
haddr_t image_addr;
- hsize_t image_len;
- hsize_t image_data_len;
- int64_t entries_loaded_counter;
- int64_t entries_inserted_counter;
- int64_t entries_relocated_counter;
- int64_t entry_fd_height_change_counter;
+ hsize_t image_len;
+ hsize_t image_data_len;
+ int64_t entries_loaded_counter;
+ int64_t entries_inserted_counter;
+ int64_t entries_relocated_counter;
+ int64_t entry_fd_height_change_counter;
uint32_t num_entries_in_image;
- H5C_image_entry_t * image_entries;
- void * image_buffer;
+ H5C_image_entry_t * image_entries;
+ void * image_buffer;
/* Free Space Manager Related fields */
hbool_t rdfsm_settled;
- hbool_t mdfsm_settled;
+ hbool_t mdfsm_settled;
#if H5C_COLLECT_CACHE_STATS
/* stats fields */
- int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1];
/* Fields for hash table operations */
- int64_t total_ht_insertions;
- int64_t total_ht_deletions;
- int64_t successful_ht_searches;
- int64_t total_successful_ht_search_depth;
- int64_t failed_ht_searches;
- int64_t total_failed_ht_search_depth;
- uint32_t max_index_len;
- size_t max_index_size;
- size_t max_clean_index_size;
- size_t max_dirty_index_size;
+ int64_t total_ht_insertions;
+ int64_t total_ht_deletions;
+ int64_t successful_ht_searches;
+ int64_t total_successful_ht_search_depth;
+ int64_t failed_ht_searches;
+ int64_t total_failed_ht_search_depth;
+ uint32_t max_index_len;
+ size_t max_index_size;
+ size_t max_clean_index_size;
+ size_t max_dirty_index_size;
/* Fields for in-order skip list */
- uint32_t max_slist_len;
- size_t max_slist_size;
+ uint32_t max_slist_len;
+ size_t max_slist_size;
/* Fields for protected entry list */
- uint32_t max_pl_len;
- size_t max_pl_size;
+ uint32_t max_pl_len;
+ size_t max_pl_size;
/* Fields for pinned entry list */
- uint32_t max_pel_len;
- size_t max_pel_size;
+ uint32_t max_pel_len;
+ size_t max_pel_size;
/* Fields for tracking 'make space in cache' (msic) operations */
- int64_t calls_to_msic;
- int64_t total_entries_skipped_in_msic;
- int64_t total_dirty_pf_entries_skipped_in_msic;
- int64_t total_entries_scanned_in_msic;
- int32_t max_entries_skipped_in_msic;
- int32_t max_dirty_pf_entries_skipped_in_msic;
- int32_t max_entries_scanned_in_msic;
- int64_t entries_scanned_to_make_space;
+ int64_t calls_to_msic;
+ int64_t total_entries_skipped_in_msic;
+ int64_t total_dirty_pf_entries_skipped_in_msic;
+ int64_t total_entries_scanned_in_msic;
+ int32_t max_entries_skipped_in_msic;
+ int32_t max_dirty_pf_entries_skipped_in_msic;
+ int32_t max_entries_scanned_in_msic;
+ int64_t entries_scanned_to_make_space;
/* Fields for tracking skip list scan restarts */
- int64_t slist_scan_restarts;
- int64_t LRU_scan_restarts;
- int64_t index_scan_restarts;
+ int64_t slist_scan_restarts;
+ int64_t LRU_scan_restarts;
+ int64_t index_scan_restarts;
/* Fields for tracking cache image operations */
- int32_t images_created;
- int32_t images_read;
- int32_t images_loaded;
- hsize_t last_image_size;
+ int32_t images_created;
+ int32_t images_read;
+ int32_t images_loaded;
+ hsize_t last_image_size;
/* Fields for tracking prefetched entries */
- int64_t prefetches;
- int64_t dirty_prefetches;
- int64_t prefetch_hits;
+ int64_t prefetches;
+ int64_t dirty_prefetches;
+ int64_t prefetch_hits;
#if H5C_COLLECT_CACHE_ENTRY_STATS
- int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
- size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
- int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
+ size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
+ int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
#endif /* H5C_COLLECT_CACHE_STATS */
- char prefix[H5C__PREFIX_LEN];
+ char prefix[H5C__PREFIX_LEN];
#ifndef NDEBUG
- int64_t get_entry_ptr_from_addr_counter;
-#endif /* NDEBUG */
+ int64_t get_entry_ptr_from_addr_counter;
+#endif
}; /* H5C_t */