summaryrefslogtreecommitdiffstats
path: root/src/H5C.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5C.c')
-rw-r--r--src/H5C.c1536
1 files changed, 839 insertions, 697 deletions
diff --git a/src/H5C.c b/src/H5C.c
index b26bd93..15ff757 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -74,8 +74,8 @@
/* Module Setup */
/****************/
-#define H5C_PACKAGE /*suppress error about including H5Cpkg */
-#define H5F_PACKAGE /*suppress error about including H5Fpkg */
+#include "H5Cmodule.h" /* This source code file is part of the H5C module */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
/***********/
@@ -106,6 +106,7 @@
#define H5C_IMAGE_EXTRA_SPACE 0
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
/******************/
/* Local Typedefs */
/******************/
@@ -146,6 +147,12 @@ static herr_t H5C_flush_invalidate_cache(const H5F_t * f,
hid_t dxpl_id,
unsigned flags);
+static herr_t H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id,
+ H5C_ring_t ring, unsigned flags);
+
+static herr_t H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned flags);
+
static void * H5C_load_entry(H5F_t * f,
hid_t dxpl_id,
#ifdef H5_HAVE_PARALLEL
@@ -202,6 +209,9 @@ herr_t H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn);
/* Package Variables */
/*********************/
+/* Package initialization variable */
+hbool_t H5_PKG_INIT_VAR = FALSE;
+
/*****************************/
/* Library Private Variables */
@@ -501,6 +511,16 @@ H5C_create(size_t max_cache_size,
cache_ptr->clean_index_size = (size_t)0;
cache_ptr->dirty_index_size = (size_t)0;
+ for(i = 0; i < H5C_RING_NTYPES; i++) {
+ cache_ptr->index_ring_len[i] = 0;
+ cache_ptr->index_ring_size[i] = (size_t)0;
+ cache_ptr->clean_index_ring_size[i] = (size_t)0;
+ cache_ptr->dirty_index_ring_size[i] = (size_t)0;
+
+ cache_ptr->slist_ring_len[i] = 0;
+ cache_ptr->slist_ring_size[i] = (size_t)0;
+ } /* end for */
+
/* Tagging Field Initializations */
cache_ptr->ignore_tags = FALSE;
@@ -515,10 +535,8 @@ H5C_create(size_t max_cache_size,
cache_ptr->slist_size_increase = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
- {
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++)
(cache_ptr->index)[i] = NULL;
- }
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
@@ -1027,13 +1045,6 @@ done:
* all unprotected entries should be flushed before the
* function returns failure.
*
- * The primary_dxpl_id and secondary_dxpl_id parameters
- * specify the dxpl_ids used on the first write occasioned
- * by the flush (primary_dxpl_id), and on all subsequent
- * writes (secondary_dxpl_id). This is useful in the metadata
- * cache, but may not be needed elsewhere. If so, just use the
- * same dxpl_id for both parameters.
- *
* Return: Non-negative on success/Negative on failure or if there was
* a request to flush all items and something was protected.
*
@@ -1054,408 +1065,105 @@ done:
*
* JRM -- 12/13/14
*
+ * Modified function to support rings. Basic idea is that
+ * every entry in the cache is assigned to a ring. Entries
+ * in the outermost ring are flushed first, followed by
+ * those in the next outermost ring, and so on until the
+ * innermost ring is flushed. See header comment on
+ * H5C_ring_t in H5Cprivate.h for a more detailed
+ * discussion.
+ *
+ * JRM -- 8/30/15
+ *
*-------------------------------------------------------------------------
*/
herr_t
H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
{
- H5C_t * cache_ptr = f->shared->cache;
- herr_t ret_value = SUCCEED;
- hbool_t destroy;
- hbool_t flushed_entries_last_pass;
- hbool_t flush_marked_entries;
- hbool_t ignore_protected;
- hbool_t tried_to_flush_protected_entry = FALSE;
- hbool_t restart_slist_scan;
- int32_t passes = 0;
- int32_t protected_entries = 0;
- H5SL_node_t * node_ptr = NULL;
- H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t flushed_entries_count = 0;
- int64_t flushed_entries_size = 0;
- int64_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
- int64_t entry_size_change;
- int64_t * entry_size_change_ptr = &entry_size_change;
-#else /* H5C_DO_SANITY_CHECKS */
- int64_t * entry_size_change_ptr = NULL;
+ int i;
+ int32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ int32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ hbool_t destroy;
+ hbool_t ignore_protected;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( cache_ptr->slist_ptr );
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
-#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on entry.\n");
- }
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n");
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
-
destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
-
- /* note that flush_marked_entries is set to FALSE if destroy is TRUE */
- flush_marked_entries = ( ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 )
- &&
- ( ! destroy )
- );
-
HDassert( ! ( destroy && ignore_protected ) );
-
HDassert( ! ( cache_ptr->flush_in_progress ) );
cache_ptr->flush_in_progress = TRUE;
- if ( destroy ) {
+ if(destroy) {
if(H5C_flush_invalidate_cache(f, dxpl_id, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed.")
- } else {
- /* When we are only flushing marked entries, the slist will usually
- * still contain entries when we have flushed everything we should.
- * Thus we track whether we have flushed any entries in the last
- * pass, and terminate if we haven't.
- */
-
- flushed_entries_last_pass = TRUE;
-
- /* set the cache_ptr->slist_change_in_pre_serialize and
- * cache_ptr->slist_change_in_serialize to false.
- *
- * These flags are set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize or serialize call respectively.
- * H5C_flush_cache uses these flags to detect any modifications
- * to the slist that might corrupt the scan of the slist -- and
- * restart the scan in this event.
+ } /* end if */
+ else {
+ /* flush each ring, starting from the outermost ring and
+ * working inward.
*/
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- while ( ( passes < H5C__MAX_PASSES_ON_FLUSH ) &&
- ( cache_ptr->slist_len != 0 ) &&
- ( protected_entries == 0 ) &&
- ( flushed_entries_last_pass ) )
- {
- unsigned curr_flush_dep_height = 0;
- unsigned flush_dep_passes = 0;
-
- flushed_entries_last_pass = FALSE;
-
- /* Loop over all flush dependency heights of entries */
- while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->slist_len != 0) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH) )
- {
- hbool_t flushed_during_dep_loop = FALSE;
-
-#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* We then zero counters that we use to track the number
- * and total size of entries flushed:
- */
- flushed_entries_count = 0;
- flushed_entries_size = 0;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
-
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
-#endif /* H5C_DO_SANITY_CHECKS */
-
- restart_slist_scan = TRUE;
-
- while ( ( restart_slist_scan ) || ( node_ptr != NULL ) )
- {
- if ( restart_slist_scan )
- {
- restart_slist_scan = FALSE;
-
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
-
- if ( node_ptr == NULL )
- {
- /* the slist is empty -- break out of inner loop */
- break;
- }
- HDassert( node_ptr != NULL );
-
- /* Get cache entry for this node */
- next_entry_ptr =
- (H5C_cache_entry_t *)H5SL_item(node_ptr);
-
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr == NULL ?!?!")
-
- HDassert( next_entry_ptr->magic == \
- H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- }
-
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, I have inserted code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
-
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
-
- if ( node_ptr != NULL ) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( NULL == next_entry_ptr )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- HDassert( entry_ptr != next_entry_ptr );
- } else {
- next_entry_ptr = NULL;
- }
-
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->in_slist );
-
- if ( ( ( ! flush_marked_entries ) ||
- ( entry_ptr->flush_marker ) ) &&
- ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) ) {
-
- if ( entry_ptr->is_protected ) {
-
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
-
- } else if ( entry_ptr->is_pinned ) {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
-#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr, NULL) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
-
-#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- flushed_during_dep_loop = TRUE;
-
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize
- = FALSE;
- cache_ptr->slist_change_in_serialize
- = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end if */
- else {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
-#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr, NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-
-#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- flushed_during_dep_loop = TRUE;
-
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize
- = FALSE;
- cache_ptr->slist_change_in_serialize
- = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
-
- /* Check for incrementing flush dependency height */
- if(flushed_during_dep_loop) {
- /* If we flushed an entry at this flush dependency height
- * start over at the bottom level of the flush dependencies
- */
- curr_flush_dep_height = 0;
-
- /* Make certain we don't get stuck in an infinite loop */
- flush_dep_passes++;
-
- /* Set flag for outer loop */
- flushed_entries_last_pass = TRUE;
- } /* end if */
- else
- curr_flush_dep_height++;
-
- } /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
-
- passes++;
-
-#if H5C_DO_SANITY_CHECKS
- /* Verify that the slist size and length are as expected. */
-
- HDassert( (initial_slist_len + cache_ptr->slist_len_increase -
- flushed_entries_count) == cache_ptr->slist_len );
- HDassert( (size_t)((int64_t)initial_slist_size +
- cache_ptr->slist_size_increase -
- flushed_entries_size) == cache_ptr->slist_size );
-#endif /* H5C_DO_SANITY_CHECKS */
-
- } /* while */
-
- HDassert( protected_entries <= cache_ptr->pl_len );
-
- if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
- ||
- ( tried_to_flush_protected_entry ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "cache has protected items")
- }
-
- if ( ( cache_ptr->slist_len != 0 ) &&
- ( passes >= H5C__MAX_PASSES_ON_FLUSH ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "flush pass limit exceeded.")
- }
-
-#if H5C_DO_SANITY_CHECKS
- if ( ! flush_marked_entries ) {
-
- HDassert( cache_ptr->slist_len == 0 );
- HDassert( cache_ptr->slist_size == 0 );
- }
-#endif /* H5C_DO_SANITY_CHECKS */
-
- }
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C_flush_ring(f, dxpl_id, ring, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed.")
+ ring++;
+ } /* end while */
+ } /* end else */
done:
-
cache_ptr->flush_in_progress = FALSE;
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_flush_cache() */
@@ -2062,20 +1770,20 @@ H5C_insert_entry(H5F_t * f,
void * thing,
unsigned int flags)
{
- H5C_t * cache_ptr;
- herr_t result;
+ H5C_t *cache_ptr;
+ H5P_genplist_t *dxpl;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
hbool_t insert_pinned;
hbool_t flush_last;
#ifdef H5_HAVE_PARALLEL
hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
hbool_t flush_collectively;
- H5P_genplist_t * dxpl; /* dataset transfer property list */
#endif /* H5_HAVE_PARALLEL */
hbool_t set_flush_marker;
hbool_t write_permitted = TRUE;
size_t empty_space;
- H5C_cache_entry_t * entry_ptr;
- H5C_cache_entry_t * test_entry_ptr;
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *test_entry_ptr;
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2112,6 +1820,14 @@ H5C_insert_entry(H5F_t * f,
flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 );
#endif /* H5_HAVE_PARALLEL */
+ /* Get the dataset transfer property list */
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "not a property list")
+
+ /* Get the ring type from the DXPL */
+ if((H5P_get(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to query ring value")
+
entry_ptr = (H5C_cache_entry_t *)thing;
/* verify that the new entry isn't already in the hash table -- scream
@@ -2189,6 +1905,8 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->flush_in_progress = FALSE;
entry_ptr->destroy_in_progress = FALSE;
+ entry_ptr->ring = ring;
+
/* Initialize flush dependency height fields */
entry_ptr->flush_dep_parent = NULL;
for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
@@ -2212,13 +1930,8 @@ H5C_insert_entry(H5F_t * f,
if ( ( cache_ptr->flash_size_increase_possible ) &&
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
- result = H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "H5C__flash_increase_cache_size failed.")
- }
+ if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed.")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -2235,33 +1948,20 @@ H5C_insert_entry(H5F_t * f,
size_t space_needed;
- if ( empty_space <= entry_ptr->size ) {
-
+ if(empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- }
-
- if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "Can't get write_permitted")
- }
- } else {
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
+ } /* end if */
+ else
write_permitted = cache_ptr->write_permitted;
- }
-
- HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE );
+ HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
space_needed = entry_ptr->size;
-
- if ( space_needed > cache_ptr->max_cache_size ) {
-
+ if(space_needed > cache_ptr->max_cache_size)
space_needed = cache_ptr->max_cache_size;
- }
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
@@ -2288,16 +1988,8 @@ H5C_insert_entry(H5F_t * f,
* no point in worrying about the third.
*/
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- space_needed,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "H5C_make_space_in_cache failed.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_make_space_in_cache failed.")
}
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -2970,6 +2662,7 @@ H5C_protect(H5F_t * f,
unsigned flags)
{
H5C_t * cache_ptr;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
hbool_t hit;
hbool_t have_write_permitted = FALSE;
hbool_t read_only = FALSE;
@@ -2979,12 +2672,11 @@ H5C_protect(H5F_t * f,
hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
#endif /* H5_HAVE_PARALLEL */
hbool_t write_permitted;
- herr_t result;
size_t empty_space;
void * thing;
H5C_cache_entry_t * entry_ptr;
H5P_genplist_t * dxpl; /* dataset transfer property list */
- void * ret_value; /* Return value */
+ void * ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI(NULL)
@@ -3016,8 +2708,12 @@ H5C_protect(H5F_t * f,
#endif /* H5_HAVE_PARALLEL */
/* Get the dataset transfer property list */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a property list");
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "not a property list")
+
+ /* Get the ring type from the DXPL */
+ if((H5P_get(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "unable to query ring value")
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
@@ -3040,6 +2736,9 @@ H5C_protect(H5F_t * f,
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
if ( entry_ptr != NULL ) {
+ if(entry_ptr->ring != ring)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry\n");
+
/* Check for trying to load the wrong type of entry from an address */
if(entry_ptr->type != type)
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
@@ -3120,16 +2819,14 @@ H5C_protect(H5F_t * f,
from disk. */
/* Get the tag from the DXPL */
- if( (H5P_get(dxpl, "H5AC_metadata_tag", &tag)) < 0 )
+ if((H5P_get(dxpl, "H5AC_metadata_tag", &tag)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to query property value");
/* Verify tag value */
- if (cache_ptr->ignore_tags != TRUE) {
-
+ if(cache_ptr->ignore_tags != TRUE) {
/* Verify legal tag value */
- if ( (H5C_verify_tag(entry_ptr->type->id, tag)) < 0 )
+ if((H5C_verify_tag(entry_ptr->type->id, tag)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed");
-
} /* end if */
}
#endif
@@ -3155,7 +2852,7 @@ H5C_protect(H5F_t * f,
}
entry_ptr = (H5C_cache_entry_t *)thing;
-
+ entry_ptr->ring = ring;
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
if(entry_ptr->coll_access) {
@@ -3176,14 +2873,8 @@ H5C_protect(H5F_t * f,
if ( ( cache_ptr->flash_size_increase_possible ) &&
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
- result = H5C__flash_increase_cache_size(cache_ptr, 0,
- entry_ptr->size);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C__flash_increase_cache_size failed.")
- }
+ if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed.")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -3209,20 +2900,11 @@ H5C_protect(H5F_t * f,
if(empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Can't get write_permitted 1")
-
- } else {
-
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
+ else
have_write_permitted = TRUE;
-
- }
} else {
write_permitted = cache_ptr->write_permitted;
@@ -3269,16 +2951,8 @@ H5C_protect(H5F_t * f,
* see no point in worrying about the fourth.
*/
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- space_needed,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C_make_space_in_cache failed 1.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 1.")
}
/* Insert the entry in the hash table. It can't be dirty yet, so
@@ -3372,19 +3046,10 @@ H5C_protect(H5F_t * f,
if ( ! have_write_permitted ) {
if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Can't get write_permitted 2")
-
- } else {
-
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 2")
+ else
have_write_permitted = TRUE;
-
- }
} else {
write_permitted = cache_ptr->write_permitted;
@@ -3398,14 +3063,8 @@ H5C_protect(H5F_t * f,
( cache_ptr->cache_accesses >=
(cache_ptr->resize_ctl).epoch_length ) ) {
- result = H5C__auto_adjust_cache_size(f,
- dxpl_id,
- write_permitted);
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Cache auto-resize failed.")
- }
+ if(H5C__auto_adjust_cache_size(f, dxpl_id, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed.")
}
if ( cache_ptr->size_decreased ) {
@@ -3434,16 +3093,8 @@ H5C_protect(H5F_t * f,
if(cache_ptr->index_size > cache_ptr->max_cache_size)
cache_ptr->cache_full = TRUE;
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- (size_t)0,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C_make_space_in_cache failed 2.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 2.")
}
}
}
@@ -5587,6 +5238,7 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
HDassert(H5F_addr_defined(child_entry->addr));
HDassert(child_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
cache_ptr = parent_entry->cache_ptr;
+ HDassert(parent_entry->ring == child_entry->ring);
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr == child_entry->cache_ptr);
@@ -7052,48 +6704,139 @@ done:
* Programmer: John Mainzer
* 3/24/065
*
- * Changes: Modified function to test for slist chamges in
- * pre_serialize and serialize callbacks, and re-start
- * scans through the slist when such changes occur.
- *
- * This has been a potential problem for some time,
- * and there has been code in this function to deal
- * with elements of this issue. However the shift
- * to the V3 cache in combination with the activities
- * of some of the cache clients (in particular the
- * free space manager and the fractal heap) have
- * made this re-work necessary in H5C_flush_cache.
- *
- * At present, this issue doesn't seem to be causing problems
- * in H5C_flush_invalidate_cache(). However, it seems
- * prudent to port the H5C_flush_cache changes to this
- * function as well.
- *
- * JRM -- 12/14/14
- *
- * Added code to track entry size change during flush single
- * entry. This didn't used to be a problem, as the entry
- * was largely removed from the cache data structures before
- * the flush proper. However, re-entrant calls to the cache
- * in the parallel case required a re-factoring of the
- * H5C__flush_single_entry() function to keep entries fully
- * in the cache until after the pre-serialize and serialize
- * calls.
- * JRM -- 12/25/14
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
+{
+ H5C_t * cache_ptr;
+ H5C_ring_t ring;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+{
+ int32_t i;
+ int32_t index_len = 0;
+ int32_t slist_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+}
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* remove ageout markers if present */
+ if(cache_ptr->epoch_markers_active > 0)
+ if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
+
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C_flush_invalidate_ring(f, dxpl_id, ring, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed.")
+ ring++;
+ } /* end while */
+
+ /* Invariants, after destroying all entries in the hash table */
+ HDassert(cache_ptr->index_size == 0);
+ HDassert(cache_ptr->clean_index_size == 0);
+ HDassert(cache_ptr->dirty_index_size == 0);
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+ HDassert(cache_ptr->pel_len == 0);
+ HDassert(cache_ptr->pel_size == 0);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->pl_size == 0);
+ HDassert(cache_ptr->LRU_list_len == 0);
+ HDassert(cache_ptr->LRU_list_size == 0);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_invalidate_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_invalidate_ring
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache and ring.
+ *
+ * If the ring contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the cache flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_flush_invalidate_cache(const H5F_t * f,
- hid_t dxpl_id,
- unsigned flags)
+H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned flags)
{
- H5C_t * cache_ptr = f->shared->cache;
+ H5C_t * cache_ptr;
hbool_t restart_slist_scan;
int32_t protected_entries = 0;
int32_t i;
- int32_t cur_pel_len;
- int32_t old_pel_len;
+ int32_t cur_ring_pel_len;
+ int32_t old_ring_pel_len;
int32_t passes = 0;
unsigned cooked_flags;
H5SL_node_t * node_ptr = NULL;
@@ -7113,27 +6856,29 @@ H5C_flush_invalidate_cache(const H5F_t * f,
FUNC_ENTER_NOAPI(FAIL)
- HDassert( f );
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( cache_ptr->slist_ptr );
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->epoch_markers_active == 0);
/* Filter out the flags that are not relevant to the flush/invalidate.
* At present, only the H5C__FLUSH_CLEAR_ONLY_FLAG is kept.
*/
cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
- /* remove ageout markers if present */
- if(cache_ptr->epoch_markers_active > 0)
- if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
/* The flush proceedure here is a bit strange.
*
* In the outer while loop we make at least one pass through the
- * cache, and then repeat until either all the pinned entries
- * unpin themselves, or until the number of pinned entries stops
- * declining. In this later case, we scream and die.
+ * cache, and then repeat until either all the pinned entries in
+ * the ring unpin themselves, or until the number of pinned entries
+ * in the ring stops declining. In this later case, we scream and die.
*
* Since the fractal heap can dirty, resize, and/or move entries
* in is flush callback, it is possible that the cache will still
@@ -7159,19 +6904,27 @@ H5C_flush_invalidate_cache(const H5F_t * f,
* to increasing address order, but there are no guarantees.
*/
- cur_pel_len = cache_ptr->pel_len;
- old_pel_len = cache_ptr->pel_len;
+ /* compute the number of pinned entries in this ring */
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+ if(entry_ptr->ring == ring)
+ cur_ring_pel_len++;
- while ( cache_ptr->index_len > 0 )
- {
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ old_ring_pel_len = cur_ring_pel_len;
+ while(cache_ptr->index_ring_len[ring] > 0) {
unsigned curr_flush_dep_height = 0;
unsigned flush_dep_passes = 0;
/* Loop over all flush dependency heights of entries */
while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->index_len > 0 ) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH) )
- {
+ (cache_ptr->index_ring_len[ring] > 0) &&
+ (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
hbool_t flushed_during_dep_loop = FALSE;
/* first, try to flush-destroy any dirty entries. Do this by
@@ -7226,37 +6979,27 @@ H5C_flush_invalidate_cache(const H5F_t * f,
cache_ptr->slist_change_in_serialize = FALSE;
/* this done, start the scan of the slist */
-
restart_slist_scan = TRUE;
-
- while ( ( restart_slist_scan ) || ( node_ptr != NULL ) )
- {
- if ( restart_slist_scan )
- {
+ while(restart_slist_scan || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
restart_slist_scan = FALSE;
/* Start at beginning of skip list */
node_ptr = H5SL_first(cache_ptr->slist_ptr);
-
- if ( node_ptr == NULL )
- {
+ if(node_ptr == NULL)
/* the slist is empty -- break out of inner loop */
break;
- }
- HDassert( node_ptr != NULL );
/* Get cache entry for this node */
next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if ( NULL == next_entry_ptr )
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr == NULL ?!?!")
-
- HDassert( next_entry_ptr->magic == \
- H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- }
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ } /* end if */
entry_ptr = next_entry_ptr;
@@ -7271,27 +7014,27 @@ H5C_flush_invalidate_cache(const H5F_t * f,
* slist if we find them. However, best we do some extra
* sanity checking just in case.
*/
-
- HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( entry_ptr->in_slist );
- HDassert( entry_ptr->is_dirty );
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
/* increment node pointer now, before we delete its target
* from the slist.
*/
-
node_ptr = H5SL_next(node_ptr);
- if ( node_ptr != NULL ) {
+ if(node_ptr != NULL) {
next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( NULL == next_entry_ptr )
+ if(NULL == next_entry_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- HDassert( entry_ptr != next_entry_ptr );
- } else {
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
next_entry_ptr = NULL;
- }
/* Note that we now remove nodes from the slist as we flush
* the associated entries, instead of leaving them there
@@ -7303,30 +7046,26 @@ H5C_flush_invalidate_cache(const H5F_t * f,
* flush, we must keep the slist in cannonical form at all
* times.
*/
-
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->in_slist );
-
- if ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) {
-
- if ( entry_ptr->is_protected ) {
-
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
+
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
/* we have major problems -- but lets flush
* everything we can before we flag an error.
*/
protected_entries++;
-
- } else if ( entry_ptr->is_pinned ) {
+ } else if(entry_ptr->is_pinned) {
/* Test to see if we are can flush the entry now.
* If we can, go ahead and flush, but don't tell
* H5C__flush_single_entry() to destroy the entry
* as pinned entries can't be evicted.
*/
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
#if H5C_DO_SANITY_CHECKS
/* update flushed_slist_len & flushed_slist_size
* before the flush. Note that the entry will
@@ -7343,7 +7082,6 @@ H5C_flush_invalidate_cache(const H5F_t * f,
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, entry_size_change_ptr, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
-
#if H5C_DO_SANITY_CHECKS
/* entry size may have changed during the flush.
* Update flushed_slist_size to account for this.
@@ -7352,10 +7090,8 @@ H5C_flush_invalidate_cache(const H5F_t * f,
#endif /* H5C_DO_SANITY_CHECKS */
flushed_during_dep_loop = TRUE;
-
- if ( ( cache_ptr->slist_change_in_serialize ) ||
- ( cache_ptr->slist_change_in_pre_serialize ) )
- {
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
/* The slist has been modified by something
* other than the simple removal of the
* of the flushed entry after the flush.
@@ -7364,18 +7100,17 @@ H5C_flush_invalidate_cache(const H5F_t * f,
* scan through the slist, so restart it.
*/
restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize
- = FALSE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
cache_ptr->slist_change_in_serialize = FALSE;
H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- }
+ } /* end if */
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
/* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
} /* end if */
else {
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
#if H5C_DO_SANITY_CHECKS
/* update flushed_slist_len & flushed_slist_size
* before the flush. Note that the entry will
@@ -7390,9 +7125,10 @@ H5C_flush_invalidate_cache(const H5F_t * f,
entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), entry_size_change_ptr, NULL) < 0)
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ entry_size_change_ptr, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
-
#if H5C_DO_SANITY_CHECKS
/* entry size may have changed during the flush.
* Update flushed_slist_size to account for this.
@@ -7402,9 +7138,8 @@ H5C_flush_invalidate_cache(const H5F_t * f,
flushed_during_dep_loop = TRUE;
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
/* The slist has been modified by something
* other than the simple removal of the
* of the flushed entry after the flush.
@@ -7413,15 +7148,14 @@ H5C_flush_invalidate_cache(const H5F_t * f,
* scan through the slist, so restart it.
*/
restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize
- = FALSE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
cache_ptr->slist_change_in_serialize = FALSE;
H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
+ } /* end if */
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
/* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
} /* end else */
} /* end if */
} /* end while loop scanning skip list */
@@ -7436,13 +7170,12 @@ H5C_flush_invalidate_cache(const H5F_t * f,
* out from under us.
*/
- if ( node_ptr == NULL ) {
-
- HDassert( (flushed_slist_len + cache_ptr->slist_len) ==
- (initial_slist_len + cache_ptr->slist_len_increase) );
- HDassert( (flushed_slist_size + (int64_t)cache_ptr->slist_size) ==
- ((int64_t)initial_slist_size + cache_ptr->slist_size_increase) );
- }
+ if(node_ptr == NULL) {
+ HDassert((flushed_slist_len + cache_ptr->slist_len) ==
+ (initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert((flushed_slist_size + (int64_t)cache_ptr->slist_size) ==
+ ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
+ } /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
/* Since we are doing a destroy, we must make a pass through
@@ -7456,42 +7189,37 @@ H5C_flush_invalidate_cache(const H5F_t * f,
*
* Writes to disk are possible here.
*/
- for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
- {
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
next_entry_ptr = cache_ptr->index[i];
- while ( next_entry_ptr != NULL )
- {
+ while(next_entry_ptr != NULL) {
entry_ptr = next_entry_ptr;
- HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
next_entry_ptr = entry_ptr->ht_next;
- HDassert ( ( next_entry_ptr == NULL ) ||
- ( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC ) );
-
- if ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) {
+ HDassert((next_entry_ptr == NULL) ||
+ (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
- if ( entry_ptr->is_protected ) {
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
/* we have major problems -- but lets flush and
* destroy everything we can before we flag an
* error.
*/
protected_entries++;
-
- if ( ! entry_ptr->in_slist ) {
-
- HDassert( !(entry_ptr->is_dirty) );
- }
- } else if ( ! ( entry_ptr->is_pinned ) ) {
+ if(!entry_ptr->in_slist)
+ HDassert(!(entry_ptr->is_dirty));
+ } else if(!(entry_ptr->is_pinned)) {
/* Test to see if we are can flush the entry now.
* If we can, go ahead and flush.
*/
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
/* if *entry_ptr is dirty, it is possible
* that one or more other entries may be
* either removed from the cache, loaded
@@ -7519,24 +7247,25 @@ H5C_flush_invalidate_cache(const H5F_t * f,
entry_was_dirty = entry_ptr->is_dirty;
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), NULL, NULL) < 0)
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
- if ( entry_was_dirty ) {
-
+ if(entry_was_dirty) {
/* update stats for hash bucket scan
* restart here.
* -- JRM
*/
next_entry_ptr = cache_ptr->index[i];
H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
- }
+ } /* end if */
flushed_during_dep_loop = TRUE;
} /* end if */
else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
/* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
} /* end if */
} /* end if */
/* We can't do anything if the entry is pinned. The
@@ -7563,16 +7292,11 @@ H5C_flush_invalidate_cache(const H5F_t * f,
* in case I am wrong.
* -- JRM
*/
- if ( ( next_entry_ptr != NULL ) &&
- ( next_entry_ptr->magic !=
- H5C__H5C_CACHE_ENTRY_T_MAGIC ) ) {
-
+ if((next_entry_ptr != NULL) && (next_entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC))
/* Something horrible has happened to
* *next_entry_ptr -- scream and die.
*/
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr->magic is invalid?!?!?.")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr->magic is invalid?!?!?.")
} /* end while loop scanning hash table bin */
} /* end for loop scanning hash table */
@@ -7591,68 +7315,439 @@ H5C_flush_invalidate_cache(const H5F_t * f,
} /* end while loop over flush dependency heights */
- old_pel_len = cur_pel_len;
- cur_pel_len = cache_ptr->pel_len;
-
- if ( ( cur_pel_len > 0 ) && ( cur_pel_len >= old_pel_len ) ) {
-
- /* The number of pinned entries is positive, and it is not
- * declining. Scream and die.
- */
+ old_ring_pel_len = cur_ring_pel_len;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "Pinned entry count not decreasing, cur_pel_len = %d, old_pel_len = %d", (int)cur_pel_len, (int)old_pel_len)
+ if(entry_ptr->ring == ring)
+ cur_ring_pel_len++;
- } else if ( ( cur_pel_len == 0 ) && ( old_pel_len == 0 ) ) {
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+ if((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ /* The number of pinned entries in the ring is positive, and
+ * it is not declining. Scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
+ } else if((cur_ring_pel_len == 0) && (old_ring_pel_len == 0)) {
/* increment the pass count */
passes++;
}
- if ( passes >= H5C__MAX_PASSES_ON_FLUSH ) {
-
+ if(passes >= H5C__MAX_PASSES_ON_FLUSH)
/* we have exceeded the maximum number of passes through the
* cache to flush and destroy all entries. Scream and die.
*/
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "Maximum passes on flush exceeded.")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Maximum passes on flush exceeded.")
} /* main while loop */
- /* Invariants, after destroying all entries in the hash table */
- HDassert( cache_ptr->index_size == 0 );
- HDassert( cache_ptr->clean_index_size == 0 );
- HDassert( cache_ptr->dirty_index_size == 0 );
- HDassert( cache_ptr->slist_len == 0 );
- HDassert( cache_ptr->slist_size == 0 );
- HDassert( cache_ptr->pel_len == 0 );
- HDassert( cache_ptr->pel_size == 0 );
- HDassert( cache_ptr->pl_len == 0 );
- HDassert( cache_ptr->pl_size == 0 );
- HDassert( cache_ptr->LRU_list_len == 0 );
- HDassert( cache_ptr->LRU_list_size == 0 );
+ /* Invariants, after destroying all entries in the ring */
+ for(i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
+ HDassert(cache_ptr->index_ring_len[i] == 0);
+ HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
+ } /* end for */
- HDassert( protected_entries <= cache_ptr->pl_len );
+ HDassert(protected_entries <= cache_ptr->pl_len);
- if ( protected_entries > 0 ) {
+ if(protected_entries > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries.")
+ else if(cur_ring_pel_len > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring.")
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "Cache has protected entries.")
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_invalidate_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_ring
+ *
+ * Purpose: Flush the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been flushed on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be flushed. However all unprotected entries in the target
+ * ring should be flushed before the function returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the slist flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t * cache_ptr = f->shared->cache;
+ hbool_t flushed_entries_last_pass;
+ hbool_t flush_marked_entries;
+ hbool_t ignore_protected;
+ hbool_t tried_to_flush_protected_entry = FALSE;
+ hbool_t restart_slist_scan;
+ int32_t passes = 0;
+ int32_t protected_entries = 0;
+ H5SL_node_t * node_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t * next_entry_ptr = NULL;
+#if H5C_DO_SANITY_CHECKS
+ int64_t flushed_entries_count = 0;
+ int64_t flushed_entries_size = 0;
+ int64_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+ int64_t entry_size_change;
+ int64_t * entry_size_change_ptr = &entry_size_change;
+#else /* H5C_DO_SANITY_CHECKS */
+ int64_t * entry_size_change_ptr = NULL;
+#endif /* H5C_DO_SANITY_CHECKS */
+ int i;
+ herr_t ret_value = SUCCEED;
- } else if ( cur_pel_len > 0 ) {
+ FUNC_ENTER_NOAPI(FAIL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "Can't unpin all pinned entries.")
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
- }
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
+ (H5C_validate_lru_list(cache_ptr) < 0)) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n");
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-done:
+ ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
+ flush_marked_entries = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 );
- FUNC_LEAVE_NOAPI(ret_value)
+ if(!flush_marked_entries)
+ for(i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
-} /* H5C_flush_invalidate_cache() */
+ HDassert(cache_ptr->flush_in_progress);
+
+ /* When we are only flushing marked entries, the slist will usually
+ * still contain entries when we have flushed everything we should.
+ * Thus we track whether we have flushed any entries in the last
+ * pass, and terminate if we haven't.
+ */
+ flushed_entries_last_pass = TRUE;
+
+ /* set the cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize to false.
+ *
+ * These flags are set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize or serialize call respectively.
+ * H5C_flush_cache uses these flags to detect any modifications
+ * to the slist that might corrupt the scan of the slist -- and
+ * restart the scan in this event.
+ */
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ while((passes < H5C__MAX_PASSES_ON_FLUSH) &&
+ (cache_ptr->slist_ring_len[ring] > 0) &&
+ (protected_entries == 0) &&
+ (flushed_entries_last_pass)) {
+ unsigned curr_flush_dep_height = 0;
+ unsigned flush_dep_passes = 0;
+
+ flushed_entries_last_pass = FALSE;
+
+ /* Loop over all flush dependency heights of entries */
+ while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
+ (cache_ptr->slist_ring_len[ring] > 0) &&
+ (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
+ hbool_t flushed_during_dep_loop = FALSE;
+
+#if H5C_DO_SANITY_CHECKS
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* We then zero counters that we use to track the number
+ * and total size of entries flushed:
+ */
+ flushed_entries_count = 0;
+ flushed_entries_size = 0;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ restart_slist_scan = TRUE;
+
+ while((restart_slist_scan ) || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ if(node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
+
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
+
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if(node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
+
+ if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
+ ((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+ } /* end if */
+ else if(entry_ptr->is_pinned) {
+
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush. Note that we
+ * aren't trying to do a destroy here, so that
+ * is not an issue.
+ */
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+#if H5C_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ flushed_during_dep_loop = TRUE;
+
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end else-if */
+ else {
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush. Note that we
+ * aren't trying to do a destroy here, so that
+ * is not an issue.
+ */
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+#if H5C_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr, NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ flushed_during_dep_loop = TRUE;
+
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end else */
+ } /* end if */
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
+
+ /* Check for incrementing flush dependency height */
+ if(flushed_during_dep_loop) {
+
+ /* If we flushed an entry at this flush dependency height
+ * start over at the bottom level of the flush dependencies
+ */
+ curr_flush_dep_height = 0;
+
+ /* Make certain we don't get stuck in an infinite loop */
+ flush_dep_passes++;
+
+ /* Set flag for outer loop */
+ flushed_entries_last_pass = TRUE;
+ } /* end if */
+ else
+ curr_flush_dep_height++;
+ } /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
+
+ passes++;
+
+#if H5C_DO_SANITY_CHECKS
+ /* Verify that the slist size and length are as expected. */
+ HDassert((initial_slist_len + cache_ptr->slist_len_increase -
+ flushed_entries_count) == cache_ptr->slist_len);
+ HDassert((size_t)((int64_t)initial_slist_size +
+ cache_ptr->slist_size_increase -
+ flushed_entries_size) == cache_ptr->slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+ } /* while */
+
+ HDassert(protected_entries <= cache_ptr->pl_len);
+
+ if(((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
+
+ if((cache_ptr->slist_len != 0) && (passes >= H5C__MAX_PASSES_ON_FLUSH))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush pass limit exceeded.")
+
+#if H5C_DO_SANITY_CHECKS
+ if(!flush_marked_entries) {
+ HDassert(cache_ptr->slist_ring_len[ring] == 0);
+ HDassert(cache_ptr->slist_ring_size[ring] == 0);
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_ring() */
/*-------------------------------------------------------------------------
@@ -7742,6 +7837,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(entry_ptr);
+ HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
/* If defined, initialize *entry_size_change_ptr to 0 */
if(entry_size_change_ptr != NULL)
@@ -8420,7 +8516,7 @@ H5C_load_entry(H5F_t * f,
MPI_Comm comm; /* File MPI Communicator */
int mpi_code; /* MPI error code */
#endif /* H5_HAVE_PARALLEL */
- void * ret_value; /* Return value */
+ void * ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -8822,6 +8918,8 @@ H5C_load_entry(H5F_t * f,
entry->flush_in_progress = FALSE;
entry->destroy_in_progress = FALSE;
+ entry->ring = H5C_RING_UNDEFINED;
+
/* Initialize flush dependency height fields */
entry->flush_dep_parent = NULL;
for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
@@ -10213,6 +10311,50 @@ H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag)
FUNC_LEAVE_NOAPI_VOID
} /* H5C_retag_copied_metadata */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_get_entry_ring
+ *
+ * Purpose: Given a file address, retrieve the ring for an entry at that
+ * address.
+ *
+ * On error, the value of *ring is not modified.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 9/8/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring)
+{
+ H5C_t *cache_ptr; /* Pointer to cache */
+ H5C_cache_entry_t *entry_ptr; /* Pointer to cache entry at address */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Locate the entry at the address */
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+ HDassert(entry_ptr);
+
+ /* Return the ring value */
+ *ring = entry_ptr->ring;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_get_entry_ring() */
+
static herr_t
H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
hid_t dxpl_id, int64_t *entry_size_change_ptr)