summaryrefslogtreecommitdiffstats
path: root/src/H5Cmpio.c
diff options
context:
space:
mode:
authorDana Robinson <43805+derobins@users.noreply.github.com>2020-11-17 13:47:52 (GMT)
committerGitHub <noreply@github.com>2020-11-17 13:47:52 (GMT)
commit5d0be1b196570ac2c08ffe0b61ac6a45b6ab7f3f (patch)
treedbe8a76bcb211babe9900647eb1f515d8623ad9b /src/H5Cmpio.c
parentec48fb8dfdb4f2f59f22a5efb6b950aafcac449d (diff)
downloadhdf5-5d0be1b196570ac2c08ffe0b61ac6a45b6ab7f3f.zip
hdf5-5d0be1b196570ac2c08ffe0b61ac6a45b6ab7f3f.tar.gz
hdf5-5d0be1b196570ac2c08ffe0b61ac6a45b6ab7f3f.tar.bz2
Manual sync with develop (#95)
Brings all features from develop. Note that RELEASE.txt has not been updated (will be done in a future PR).
Diffstat (limited to 'src/H5Cmpio.c')
-rw-r--r--src/H5Cmpio.c79
1 files changed, 59 insertions, 20 deletions
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 6c0c015..0489ad2 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -154,6 +154,10 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring, unsigned
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: Updated sanity checks to allow for the possibility that
+ * the slist is disabled.
+ * JRM -- 8/3/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -190,7 +194,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(num_candidates > 0);
- HDassert(num_candidates <= cache_ptr->slist_len);
+ HDassert((!cache_ptr->slist_enabled) || (num_candidates <= cache_ptr->slist_len));
HDassert(candidates_list_ptr != NULL);
HDassert(0 <= mpi_rank);
HDassert(mpi_rank < mpi_size);
@@ -416,6 +420,16 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Thus we can not longer use
+ * cache_ptr->slist_size to determine the total size of
+ * the entries we must insert in the candidate list.
+ *
+ * To address this, we now use cache_ptr->dirty_index_size
+ * instead.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -429,18 +443,22 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* As a sanity check, set space needed to the size of the skip list.
- * This should be the sum total of the sizes of all the dirty entries
- * in the metadata cache.
+ /* As a sanity check, set space needed to the dirty_index_size. This
+ * should be the sum total of the sizes of all the dirty entries
+ * in the metadata cache. Note that if the slist is enabled,
+ * cache_ptr->slist_size should equal cache_ptr->dirty_index_size.
*/
- space_needed = cache_ptr->slist_size;
+ space_needed = cache_ptr->dirty_index_size;
+
+ HDassert((!cache_ptr->slist_enabled) || (space_needed == cache_ptr->slist_size));
/* Recall that while we shouldn't have any protected entries at this
* point, it is possible that some dirty entries may reside on the
* pinned list at this point.
*/
- HDassert(cache_ptr->slist_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
- HDassert(cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len));
+ HDassert(cache_ptr->dirty_index_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
+ HDassert((!cache_ptr->slist_enabled) ||
+ (cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len)));
if (space_needed > 0) { /* we have work to do */
@@ -449,7 +467,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
size_t nominated_entries_size = 0;
haddr_t nominated_addr;
- HDassert(cache_ptr->slist_len > 0);
+ HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
@@ -457,14 +475,14 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
entry_ptr = cache_ptr->dLRU_tail_ptr;
while ((nominated_entries_size < space_needed) &&
- (nominated_entries_count < cache_ptr->slist_len) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL)) {
HDassert(!(entry_ptr->is_protected));
HDassert(!(entry_ptr->is_read_only));
HDassert(entry_ptr->ro_ref_count == 0);
HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
@@ -484,7 +502,9 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
* protected entry list as well -- scan it too if necessary
*/
entry_ptr = cache_ptr->pel_head_ptr;
- while ((nominated_entries_size < space_needed) && (nominated_entries_count < cache_ptr->slist_len) &&
+
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL)) {
if (entry_ptr->is_dirty) {
@@ -510,7 +530,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
} /* end while */
- HDassert(nominated_entries_count == cache_ptr->slist_len);
+ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count == cache_ptr->slist_len));
HDassert(nominated_entries_size == space_needed);
} /* end if */
@@ -537,6 +557,12 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Updated sanity checks to
+ * reflect this.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -556,17 +582,26 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
if (cache_ptr->max_cache_size > cache_ptr->index_size) {
if (((cache_ptr->max_cache_size - cache_ptr->index_size) + cache_ptr->cLRU_list_size) >=
- cache_ptr->min_clean_size)
+ cache_ptr->min_clean_size) {
+
space_needed = 0;
- else
+ }
+ else {
+
space_needed = cache_ptr->min_clean_size -
((cache_ptr->max_cache_size - cache_ptr->index_size) + cache_ptr->cLRU_list_size);
+ }
} /* end if */
else {
- if (cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size)
+
+ if (cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size) {
+
space_needed = 0;
- else
+ }
+ else {
+
space_needed = cache_ptr->min_clean_size - cache_ptr->cLRU_list_size;
+ }
} /* end else */
if (space_needed > 0) { /* we have work to do */
@@ -575,13 +610,15 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
- HDassert(cache_ptr->slist_len > 0);
+ HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while ((nominated_entries_size < space_needed) && (nominated_entries_count < cache_ptr->slist_len) &&
+
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL) && (!entry_ptr->flush_me_last)) {
haddr_t nominated_addr;
@@ -590,7 +627,7 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
HDassert(!(entry_ptr->is_read_only));
HDassert(entry_ptr->ro_ref_count == 0);
HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
@@ -603,7 +640,9 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
entry_ptr = entry_ptr->aux_prev;
} /* end while */
- HDassert(nominated_entries_count <= cache_ptr->slist_len);
+
+ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count <= cache_ptr->slist_len));
+ HDassert(nominated_entries_size <= cache_ptr->dirty_index_size);
HDassert(nominated_entries_size >= space_needed);
} /* end if */