summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorNeil Fortner <fortnern@gmail.com>2024-02-23 17:06:06 (GMT)
committerGitHub <noreply@github.com>2024-02-23 17:06:06 (GMT)
commit560e80c0ad8494a2e070aafde9cbcff11de99219 (patch)
tree5ddb917d293eb80471e66fb98fe88a159b0419e5 /src
parent3fd1e90df69462a0d55b5d830e8eee663fcb44bb (diff)
downloadhdf5-560e80c0ad8494a2e070aafde9cbcff11de99219.zip
hdf5-560e80c0ad8494a2e070aafde9cbcff11de99219.tar.gz
hdf5-560e80c0ad8494a2e070aafde9cbcff11de99219.tar.bz2
Improve performance of flushing single objects (#4017)
Improve performance of flushing a single object, and remove metadata cache flush markers
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c4
-rw-r--r--src/H5ACprivate.h2
-rw-r--r--src/H5C.c79
-rw-r--r--src/H5Centry.c58
-rw-r--r--src/H5Cint.c36
-rw-r--r--src/H5Cpkg.h9
-rw-r--r--src/H5Cprivate.h48
-rw-r--r--src/H5Ctag.c148
8 files changed, 126 insertions, 258 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 802ccdd..b752803 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -459,7 +459,7 @@ H5AC_dest(H5F_t *f)
*/
if (H5F_ACC_RDWR & H5F_INTENT(f)) {
/* enable and load the skip list */
- if (H5C_set_slist_enabled(f->shared->cache, true, false) < 0)
+ if (H5C_set_slist_enabled(f->shared->cache, true, true) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't enable skip list");
if (H5AC__flush_entries(f) < 0)
@@ -1127,7 +1127,7 @@ H5AC_prep_for_file_flush(H5F_t *f)
assert(f->shared);
assert(f->shared->cache);
- if (H5C_set_slist_enabled(f->shared->cache, true, false) < 0)
+ if (H5C_set_slist_enabled(f->shared->cache, true, true) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't enable skip list");
done:
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 5e23036..51f1b35 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -312,14 +312,12 @@ typedef struct H5AC_proxy_entry_t {
*/
#define H5AC__NO_FLAGS_SET H5C__NO_FLAGS_SET
-#define H5AC__SET_FLUSH_MARKER_FLAG H5C__SET_FLUSH_MARKER_FLAG
#define H5AC__DELETED_FLAG H5C__DELETED_FLAG
#define H5AC__DIRTIED_FLAG H5C__DIRTIED_FLAG
#define H5AC__PIN_ENTRY_FLAG H5C__PIN_ENTRY_FLAG
#define H5AC__UNPIN_ENTRY_FLAG H5C__UNPIN_ENTRY_FLAG
#define H5AC__FLUSH_INVALIDATE_FLAG H5C__FLUSH_INVALIDATE_FLAG
#define H5AC__FLUSH_CLEAR_ONLY_FLAG H5C__FLUSH_CLEAR_ONLY_FLAG
-#define H5AC__FLUSH_MARKED_ENTRIES_FLAG H5C__FLUSH_MARKED_ENTRIES_FLAG
#define H5AC__FLUSH_IGNORE_PROTECTED_FLAG H5C__FLUSH_IGNORE_PROTECTED_FLAG
#define H5AC__READ_ONLY_FLAG H5C__READ_ONLY_FLAG
#define H5AC__FREE_FILE_SPACE_FLAG H5C__FREE_FILE_SPACE_FLAG
diff --git a/src/H5C.c b/src/H5C.c
index eb6e49e..1713e83 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -493,7 +493,7 @@ H5C_dest(H5F_t *f)
#endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
/* Enable the slist, as it is needed in the flush */
- if (H5C_set_slist_enabled(f->shared->cache, true, false) < 0)
+ if (H5C_set_slist_enabled(f->shared->cache, true, true) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed");
/* Flush and invalidate all cache entries */
@@ -567,7 +567,7 @@ H5C_evict(H5F_t *f)
assert(f);
/* Enable the slist, as it is needed in the flush */
- if (H5C_set_slist_enabled(f->shared->cache, true, false) < 0)
+ if (H5C_set_slist_enabled(f->shared->cache, true, true) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed");
/* Flush and invalidate all cache entries except the pinned entries */
@@ -575,7 +575,7 @@ H5C_evict(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache");
/* Disable the slist */
- if (H5C_set_slist_enabled(f->shared->cache, false, true) < 0)
+ if (H5C_set_slist_enabled(f->shared->cache, false, false) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist disabled failed");
done:
@@ -1042,41 +1042,32 @@ done:
*
* 1) Verifies that the slist is empty.
*
- * 2) Scans the index list, and inserts all dirty entries
- * into the slist.
+ * 2) If the populate_slist parameter is true, scans the
+ * index list, and inserts all dirty entries into the
+ * slist.
*
* 3) Sets cache_ptr->slist_enabled = true.
*
- * Note that the clear_slist parameter is ignored if
- * the slist_enabed parameter is true.
- *
*
* If the slist_enabled_parameter is false, the function
- * shuts down the slist.
- *
- * Normally the slist will be empty at this point, however
- * that need not be the case if H5C_flush_cache() has been
- * called with the H5C__FLUSH_MARKED_ENTRIES_FLAG.
- *
- * Thus shutdown proceeds as follows:
+ * shuts down the slist:
*
* 1) Test to see if the slist is empty. If it is, proceed
* to step 3.
*
- * 2) Test to see if the clear_slist parameter is true.
- *
- * If it is, remove all entries from the slist.
- *
- * If it isn't, throw an error.
+ * 2) Remove all entries from the slist.
*
* 3) set cache_ptr->slist_enabled = false.
*
+ * Note that the populate_slist parameter is ignored if
+ * the slist_enabed parameter is false.
+ *
* Return: SUCCEED on success, and FAIL on failure.
*
*-------------------------------------------------------------------------
*/
herr_t
-H5C_set_slist_enabled(H5C_t *cache_ptr, bool slist_enabled, bool clear_slist)
+H5C_set_slist_enabled(H5C_t *cache_ptr, bool slist_enabled, bool populate_slist)
{
H5C_cache_entry_t *entry_ptr;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1097,40 +1088,30 @@ H5C_set_slist_enabled(H5C_t *cache_ptr, bool slist_enabled, bool clear_slist)
*/
cache_ptr->slist_enabled = true;
- /* scan the index list and insert all dirty entries in the slist */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- if (entry_ptr->is_dirty)
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
- entry_ptr = entry_ptr->il_next;
- }
+ if (populate_slist) {
+ /* scan the index list and insert all dirty entries in the slist */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ if (entry_ptr->is_dirty)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
+ entry_ptr = entry_ptr->il_next;
+ }
- /* we don't maintain a dirty index len, so we can't do a cross
- * check against it. Note that there is no point in cross checking
- * against the dirty LRU size, as the dirty LRU may not be maintained,
- * and in any case, there is no requirement that all dirty entries
- * will reside on the dirty LRU.
- */
- assert(cache_ptr->dirty_index_size == cache_ptr->slist_size);
+ /* we don't maintain a dirty index len, so we can't do a cross
+ * check against it. Note that there is no point in cross checking
+ * against the dirty LRU size, as the dirty LRU may not be maintained,
+ * and in any case, there is no requirement that all dirty entries
+ * will reside on the dirty LRU.
+ */
+ assert(cache_ptr->dirty_index_size == cache_ptr->slist_size);
+ }
}
else { /* take down the skip list */
if (!cache_ptr->slist_enabled)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?");
- if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) {
- if (clear_slist) {
- H5SL_node_t *node_ptr;
-
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- while (node_ptr != NULL) {
- entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, false, FAIL);
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- }
- }
- else
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?");
- }
+ if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty?");
cache_ptr->slist_enabled = false;
diff --git a/src/H5Centry.c b/src/H5Centry.c
index 2bbf9ac..fec1f4a 100644
--- a/src/H5Centry.c
+++ b/src/H5Centry.c
@@ -503,36 +503,24 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
if (cache_ptr->slist_enabled) {
if (entry_ptr->in_slist) {
assert(entry_ptr->is_dirty);
- if (entry_ptr->flush_marker && !entry_ptr->is_dirty)
+ if (!entry_ptr->is_dirty)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks");
} /* end if */
- else {
- assert(!entry_ptr->is_dirty);
- assert(!entry_ptr->flush_marker);
- if (entry_ptr->is_dirty || entry_ptr->flush_marker)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks");
- } /* end else */
}
- else { /* slist is disabled */
+ else /* slist is disabled */
assert(!entry_ptr->in_slist);
- if (!entry_ptr->is_dirty)
- if (entry_ptr->flush_marker)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?");
- }
#endif /* H5C_DO_SANITY_CHECKS */
if (entry_ptr->is_protected)
/* Attempt to flush a protected entry -- scream and die. */
HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry");
- /* Set entry_ptr->flush_in_progress = true and set
- * entry_ptr->flush_marker = false
+ /* Set entry_ptr->flush_in_progress = true
*
* We will set flush_in_progress back to false at the end if the
* entry still exists at that point.
*/
entry_ptr->flush_in_progress = true;
- entry_ptr->flush_marker = false;
/* Preserve current dirty state for later */
was_dirty = entry_ptr->is_dirty;
@@ -1240,7 +1228,6 @@ H5C__load_entry(H5F_t *f,
entry->ro_ref_count = 0;
entry->is_pinned = false;
entry->in_slist = false;
- entry->flush_marker = false;
#ifdef H5_HAVE_PARALLEL
entry->clear_on_unprotect = false;
entry->flush_immediately = false;
@@ -1897,7 +1884,6 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t
ds_entry_ptr->ro_ref_count = 0;
ds_entry_ptr->is_pinned = false;
ds_entry_ptr->in_slist = false;
- ds_entry_ptr->flush_marker = false;
#ifdef H5_HAVE_PARALLEL
ds_entry_ptr->clear_on_unprotect = false;
ds_entry_ptr->flush_immediately = false;
@@ -2095,7 +2081,6 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
#ifdef H5_HAVE_PARALLEL
bool coll_access = false; /* whether access to the cache entry is done collectively */
#endif /* H5_HAVE_PARALLEL */
- bool set_flush_marker;
bool write_permitted = true;
size_t empty_space;
H5C_cache_entry_t *entry_ptr = NULL;
@@ -2125,9 +2110,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry");
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
- insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
- flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
+ insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
+ flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
/* Get the ring type from the API context */
ring = H5CX_get_ring();
@@ -2301,7 +2285,6 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
/* New entries are presumed to be dirty */
assert(entry_ptr->is_dirty);
- entry_ptr->flush_marker = set_flush_marker;
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL);
@@ -2497,9 +2480,6 @@ H5C_mark_entry_clean(void *_thing)
/* Mark the entry as clean if it isn't already */
entry_ptr->is_dirty = false;
- /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */
- entry_ptr->flush_marker = false;
-
/* Modify cache data structures */
if (was_dirty)
H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL);
@@ -3426,7 +3406,6 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
H5C_t *cache_ptr;
bool deleted;
bool dirtied;
- bool set_flush_marker;
bool pin_entry;
bool unpin_entry;
bool free_file_space;
@@ -3441,13 +3420,12 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
FUNC_ENTER_NOAPI(FAIL)
- deleted = ((flags & H5C__DELETED_FLAG) != 0);
- dirtied = ((flags & H5C__DIRTIED_FLAG) != 0);
- set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
- pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
- unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0);
- free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
- take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
+ deleted = ((flags & H5C__DELETED_FLAG) != 0);
+ dirtied = ((flags & H5C__DIRTIED_FLAG) != 0);
+ pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
+ unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
assert(f);
assert(f->shared);
@@ -3621,15 +3599,10 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
entry_ptr->is_protected = false;
- /* if the entry is dirty, 'or' its flush_marker with the set flush flag,
- * and then add it to the skip list if it isn't there already.
- */
- if (entry_ptr->is_dirty) {
- entry_ptr->flush_marker |= set_flush_marker;
- if (!entry_ptr->in_slist)
- /* this is a no-op if cache_ptr->slist_enabled is false */
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
- } /* end if */
+ /* if the entry is dirty, add it to the skip list if it isn't there already. */
+ if (entry_ptr->is_dirty && !entry_ptr->in_slist)
+ /* this is a no-op if cache_ptr->slist_enabled is false */
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
/* This implementation of the "deleted" option is a bit inefficient, as
* we re-insert the entry to be deleted into the replacement policy
@@ -4141,7 +4114,6 @@ H5C_remove_entry(void *_entry)
/* Additional internal cache consistency checks */
assert(!entry->in_slist);
- assert(!entry->flush_marker);
assert(!entry->flush_in_progress);
/* Note that the algorithm below is (very) similar to the set of operations
diff --git a/src/H5Cint.c b/src/H5Cint.c
index 31a947a..2e79a0d 100644
--- a/src/H5Cint.c
+++ b/src/H5Cint.c
@@ -1207,10 +1207,10 @@ H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
old_ring_pel_len = cur_ring_pel_len;
while (cache_ptr->index_ring_len[ring] > 0) {
- /* first, try to flush-destroy any dirty entries. Do this by
+ /* First, try to flush-destroy any dirty entries. Do this by
* making a scan through the slist. Note that new dirty entries
- * may be created by the flush call backs. Thus it is possible
- * that the slist will not be empty after we finish the scan.
+ * may be created by the flush call back, thus we may need to
+ * restart the scan (see below).
*/
#ifdef H5C_DO_SANITY_CHECKS
@@ -1573,7 +1573,6 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
{
H5C_t *cache_ptr = f->shared->cache;
bool flushed_entries_last_pass;
- bool flush_marked_entries;
bool ignore_protected;
bool tried_to_flush_protected_entry = false;
bool restart_slist_scan;
@@ -1603,12 +1602,10 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry");
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
- flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
+ ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
- if (!flush_marked_entries)
- for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
- assert(cache_ptr->slist_ring_len[i] == 0);
+ for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
+ assert(cache_ptr->slist_ring_len[i] == 0);
assert(cache_ptr->flush_in_progress);
@@ -1712,9 +1709,7 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
*/
assert(entry_ptr->in_slist);
assert(entry_ptr->is_dirty);
-
- if (!flush_marked_entries || entry_ptr->flush_marker)
- assert(entry_ptr->ring >= ring);
+ assert(entry_ptr->ring >= ring);
/* Advance node pointer now, before we delete its target
* from the slist.
@@ -1727,19 +1722,14 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
assert(next_entry_ptr->is_dirty);
assert(next_entry_ptr->in_slist);
-
- if (!flush_marked_entries || next_entry_ptr->flush_marker)
- assert(next_entry_ptr->ring >= ring);
-
+ assert(next_entry_ptr->ring >= ring);
assert(entry_ptr != next_entry_ptr);
} /* end if */
else
next_entry_ptr = NULL;
- if ((!flush_marked_entries || entry_ptr->flush_marker) &&
- ((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
- (flush_marked_entries && entry_ptr->flush_marker)))) &&
+ if (((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) && cache_ptr->num_last_entries >= cache_ptr->slist_len)) &&
((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) &&
(entry_ptr->ring == ring)) {
@@ -1790,10 +1780,8 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items");
#ifdef H5C_DO_SANITY_CHECKS
- if (!flush_marked_entries) {
- assert(cache_ptr->slist_ring_len[ring] == 0);
- assert(cache_ptr->slist_ring_size[ring] == 0);
- } /* end if */
+ assert(cache_ptr->slist_ring_len[ring] == 0);
+ assert(cache_ptr->slist_ring_size[ring] == 0);
#endif /* H5C_DO_SANITY_CHECKS */
done:
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 5417684..4408774 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -2168,15 +2168,14 @@ typedef struct H5C_tag_info_t {
* insert all dirtly entries in the skip list, and enable operations
* on skip list by setting above control flag to true.
*
+ * In the case of a partial flush (i.e. flush tagged entries), we only
+ * add tagged entries to the skip list, thus avoiding unnecessary scans
+ * over the entire cache.
+ *
* At the end of a complete flush, we verify that the skip list is empty,
* and set the control flag back to false, so as to avoid skip list
* maintenance overhead until the next flush or close.
*
- * In the case of a partial flush (i.e. flush marked entries), we remove
- * all remaining entries from the skip list, and then set the control flag
- * back to false -- again avoiding skip list maintenance overhead until
- * the next flush or close.
- *
* slist_enabled: Boolean flag used to control operation of the skip
* list. If this filed is false, operations on the slist are
* no-ops, and the slist must be empty. If it is true,
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 3477e75..9f12312 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -134,7 +134,6 @@
*
*
* These flags apply to H5C_insert_entry():
- * H5C__SET_FLUSH_MARKER_FLAG
* H5C__PIN_ENTRY_FLAG
* H5C__FLUSH_LAST_FLAG ; super block only
* H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
@@ -145,7 +144,6 @@
* H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
*
* These flags apply to H5C_unprotect():
- * H5C__SET_FLUSH_MARKER_FLAG
* H5C__DELETED_FLAG
* H5C__DIRTIED_FLAG
* H5C__PIN_ENTRY_FLAG
@@ -162,7 +160,6 @@
* These flags apply to H5C_flush_cache():
* H5C__FLUSH_INVALIDATE_FLAG
* H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
* H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
* with H5C__FLUSH_INVALIDATE_FLAG)
* H5C__DURING_FLUSH_FLAG
@@ -170,32 +167,29 @@
* These flags apply to H5C_flush_single_entry():
* H5C__FLUSH_INVALIDATE_FLAG
* H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
* H5C__TAKE_OWNERSHIP_FLAG
* H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG
* H5C__GENERATE_IMAGE_FLAG
* H5C__UPDATE_PAGE_BUFFER_FLAG
*/
#define H5C__NO_FLAGS_SET 0x00000
-#define H5C__SET_FLUSH_MARKER_FLAG 0x00001
-#define H5C__DELETED_FLAG 0x00002
-#define H5C__DIRTIED_FLAG 0x00004
-#define H5C__PIN_ENTRY_FLAG 0x00008
-#define H5C__UNPIN_ENTRY_FLAG 0x00010
-#define H5C__FLUSH_INVALIDATE_FLAG 0x00020
-#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x00040
-#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x00080
-#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x00100
-#define H5C__READ_ONLY_FLAG 0x00200
-#define H5C__FREE_FILE_SPACE_FLAG 0x00400
-#define H5C__TAKE_OWNERSHIP_FLAG 0x00800
-#define H5C__FLUSH_LAST_FLAG 0x01000
-#define H5C__FLUSH_COLLECTIVELY_FLAG 0x02000
-#define H5C__EVICT_ALLOW_LAST_PINS_FLAG 0x04000
-#define H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG 0x08000
-#define H5C__DURING_FLUSH_FLAG 0x10000 /* Set when the entire cache is being flushed */
-#define H5C__GENERATE_IMAGE_FLAG 0x20000 /* Set during parallel I/O */
-#define H5C__UPDATE_PAGE_BUFFER_FLAG 0x40000 /* Set during parallel I/O */
+#define H5C__DELETED_FLAG 0x00001
+#define H5C__DIRTIED_FLAG 0x00002
+#define H5C__PIN_ENTRY_FLAG 0x00004
+#define H5C__UNPIN_ENTRY_FLAG 0x00008
+#define H5C__FLUSH_INVALIDATE_FLAG 0x00010
+#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x00020
+#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x00040
+#define H5C__READ_ONLY_FLAG 0x00080
+#define H5C__FREE_FILE_SPACE_FLAG 0x00100
+#define H5C__TAKE_OWNERSHIP_FLAG 0x00200
+#define H5C__FLUSH_LAST_FLAG 0x00400
+#define H5C__FLUSH_COLLECTIVELY_FLAG 0x00800
+#define H5C__EVICT_ALLOW_LAST_PINS_FLAG 0x01000
+#define H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG 0x02000
+#define H5C__DURING_FLUSH_FLAG 0x04000 /* Set when the entire cache is being flushed */
+#define H5C__GENERATE_IMAGE_FLAG 0x08000 /* Set during parallel I/O */
+#define H5C__UPDATE_PAGE_BUFFER_FLAG 0x10000 /* Set during parallel I/O */
/* Debugging/sanity checking/statistics settings */
/* #define H5C_DO_SANITY_CHECKS */
@@ -1074,11 +1068,6 @@ typedef int H5C_ring_t;
* As a general rule, entries are placed in the list when they are
* marked dirty.
*
- * flush_marker: Boolean flag indicating that the entry is to be flushed
- * the next time H5C_flush_cache() is called with the
- * H5C__FLUSH_MARKED_ENTRIES_FLAG. The flag is reset when
- * the entry is flushed for whatever reason.
- *
* flush_me_last: Boolean flag indicating that this entry should not be
* flushed from the cache until all other entries without the
* flush_me_last flag set have been flushed.
@@ -1557,7 +1546,6 @@ typedef struct H5C_cache_entry_t {
int ro_ref_count;
bool is_pinned;
bool in_slist;
- bool flush_marker;
bool flush_me_last;
#ifdef H5_HAVE_PARALLEL
bool clear_on_unprotect;
@@ -2207,7 +2195,7 @@ H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr);
H5_DLL herr_t H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr);
H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, bool evictions_enabled);
-H5_DLL herr_t H5C_set_slist_enabled(H5C_t *cache_ptr, bool slist_enabled, bool clear_slist);
+H5_DLL herr_t H5C_set_slist_enabled(H5C_t *cache_ptr, bool slist_enabled, bool populate_slist);
H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix);
H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name, bool display_detailed_stats);
H5_DLL void H5C_stats__reset(H5C_t *cache_ptr);
diff --git a/src/H5Ctag.c b/src/H5Ctag.c
index a1a9f42..8da6c13 100644
--- a/src/H5Ctag.c
+++ b/src/H5Ctag.c
@@ -80,8 +80,6 @@ typedef struct {
/* Local Prototypes */
/********************/
static herr_t H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, void *cb_ctx);
-static herr_t H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag);
-static herr_t H5C__flush_marked_entries(H5F_t *f);
/*********************/
/* Package Variables */
@@ -520,101 +518,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_evict_tagged_entries() */
-/*-------------------------------------------------------------------------
- * Function: H5C__mark_tagged_entries_cb
- *
- * Purpose: Callback to set the flush marker on dirty entries in the cache
- *
- * Return: H5_ITER_CONT (can't fail)
- *
- *-------------------------------------------------------------------------
- */
-static int
-H5C__mark_tagged_entries_cb(H5C_cache_entry_t *entry, void H5_ATTR_UNUSED *_ctx)
-{
- /* Function enter macro */
- FUNC_ENTER_PACKAGE_NOERR
-
- /* Sanity checks */
- assert(entry);
-
- /* We only want to set the flush marker on entries that
- * actually need flushed (i.e., dirty ones) */
- if (entry->is_dirty)
- entry->flush_marker = true;
-
- FUNC_LEAVE_NOAPI(H5_ITER_CONT)
-} /* H5C__mark_tagged_entries_cb() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__mark_tagged_entries
- *
- * Purpose: Set the flush marker on dirty entries in the cache that have
- * the specified tag, as well as all globally tagged entries.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- /* Function enter macro */
- FUNC_ENTER_PACKAGE
-
- /* Sanity check */
- assert(cache);
-
- /* Iterate through hash table entries, marking those with specified tag, as
- * well as any major global entries which should always be flushed
- * when flushing based on tag value */
- if (H5C__iter_tagged_entries(cache, tag, true, H5C__mark_tagged_entries_cb, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "Iteration of tagged entries failed");
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__mark_tagged_entries() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__flush_marked_entries
- *
- * Purpose: Flushes all marked entries in the cache.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__flush_marked_entries(H5F_t *f)
-{
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- /* Assertions */
- assert(f != NULL);
-
- /* Enable the slist, as it is needed in the flush */
- if (H5C_set_slist_enabled(f->shared->cache, true, false) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed");
-
- /* Flush all marked entries */
- if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache");
-
- /* Disable the slist. Set the clear_slist parameter to true
- * since we called H5C_flush_cache() with the
- * H5C__FLUSH_MARKED_ENTRIES_FLAG.
- */
- if (H5C_set_slist_enabled(f->shared->cache, false, true) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed");
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__flush_marked_entries */
-
#ifdef H5C_DO_TAGGING_SANITY_CHECKS
/*-------------------------------------------------------------------------
@@ -685,6 +588,36 @@ done:
#endif
/*-------------------------------------------------------------------------
+ * Function: H5C__flush_tagged_entries_cb
+ *
+ * Purpose: Callback to set the flush marker on dirty entries in the cache
+ *
+ * Return: H5_ITER_CONT (can't fail)
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5C__flush_tagged_entries_cb(H5C_cache_entry_t *entry, void *_ctx)
+{
+ H5C_t *cache_ptr = (H5C_t *)_ctx;
+ int ret_value = H5_ITER_CONT;
+
+ /* Function enter macro */
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ assert(entry);
+ assert(cache_ptr);
+
+ /* We only want to add entries to the slist that actually need flushed (i.e., dirty ones) */
+ if (entry->is_dirty)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry, H5_ITER_ERROR);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_tagged_entries_cb() */
+
+/*-------------------------------------------------------------------------
* Function: H5C_flush_tagged_entries
*
* Purpose: Flushes all entries with the specified tag to disk.
@@ -709,13 +642,22 @@ H5C_flush_tagged_entries(H5F_t *f, haddr_t tag)
/* Get cache pointer */
cache = f->shared->cache;
- /* Mark all entries with specified tag */
- if (H5C__mark_tagged_entries(cache, tag) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't mark tagged entries");
+ /* Enable the slist, as it is needed in the flush */
+ if (H5C_set_slist_enabled(f->shared->cache, true, false) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed");
+
+ /* Iterate through hash table entries, adding those with specified tag to the slist, as well as any major
+ * global entries which should always be flushed when flushing based on tag value */
+ if (H5C__iter_tagged_entries(cache, tag, true, H5C__flush_tagged_entries_cb, cache) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "Iteration of tagged entries failed");
+
+ /* Flush all entries in the slist */
+ if (H5C_flush_cache(f, H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache");
- /* Flush all marked entries */
- if (H5C__flush_marked_entries(f) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush marked entries");
+ /* Disable the slist */
+ if (H5C_set_slist_enabled(f->shared->cache, false, false) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed");
done:
FUNC_LEAVE_NOAPI(ret_value)