summaryrefslogtreecommitdiffstats
path: root/src/H5C.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5C.c')
-rw-r--r--src/H5C.c1384
1 files changed, 880 insertions, 504 deletions
diff --git a/src/H5C.c b/src/H5C.c
index 7d2d4ea..27b98d0 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -3147,6 +3147,10 @@ H5C_create(size_t max_cache_size,
cache_ptr->epoch_marker_ringbuf_last = 0;
cache_ptr->epoch_marker_ringbuf_size = 0;
+ /* Initialize all epoch marker entries' fields to zero/FALSE/NULL */
+ HDmemset(cache_ptr->epoch_markers, 0, sizeof(cache_ptr->epoch_markers));
+
+ /* Set non-zero/FALSE/NULL fields for epoch markers */
for ( i = 0; i < H5C__MAX_EPOCH_MARKERS; i++ )
{
(cache_ptr->epoch_marker_active)[i] = FALSE;
@@ -3155,27 +3159,7 @@ H5C_create(size_t max_cache_size,
H5C__H5C_CACHE_ENTRY_T_MAGIC;
#endif /* NDEBUG */
((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i;
- ((cache_ptr->epoch_markers)[i]).size = (size_t)0;
((cache_ptr->epoch_markers)[i]).type = &epoch_marker_class;
- ((cache_ptr->epoch_markers)[i]).is_dirty = FALSE;
- ((cache_ptr->epoch_markers)[i]).dirtied = FALSE;
- ((cache_ptr->epoch_markers)[i]).is_protected = FALSE;
- ((cache_ptr->epoch_markers)[i]).is_read_only = FALSE;
- ((cache_ptr->epoch_markers)[i]).ro_ref_count = 0;
- ((cache_ptr->epoch_markers)[i]).is_pinned = FALSE;
- ((cache_ptr->epoch_markers)[i]).in_slist = FALSE;
- ((cache_ptr->epoch_markers)[i]).ht_next = NULL;
- ((cache_ptr->epoch_markers)[i]).ht_prev = NULL;
- ((cache_ptr->epoch_markers)[i]).next = NULL;
- ((cache_ptr->epoch_markers)[i]).prev = NULL;
- ((cache_ptr->epoch_markers)[i]).aux_next = NULL;
- ((cache_ptr->epoch_markers)[i]).aux_prev = NULL;
-#if H5C_COLLECT_CACHE_ENTRY_STATS
- ((cache_ptr->epoch_markers)[i]).accesses = 0;
- ((cache_ptr->epoch_markers)[i]).clears = 0;
- ((cache_ptr->epoch_markers)[i]).flushes = 0;
- ((cache_ptr->epoch_markers)[i]).pins = 0;
-#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
}
if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
@@ -3824,222 +3808,244 @@ H5C_flush_cache(H5F_t * f,
( protected_entries == 0 ) &&
( flushed_entries_last_pass ) )
{
- flushed_entries_last_pass = FALSE;
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ unsigned curr_flush_dep_height = 0;
+ unsigned flush_dep_passes = 0;
- if ( node_ptr != NULL ) {
+ flushed_entries_last_pass = FALSE;
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ /* Loop over all flush dependency heights of entries */
+ while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
+ (cache_ptr->slist_len != 0) &&
+ (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH) )
+ {
+ hbool_t flushed_during_dep_loop = FALSE;
- if ( next_entry_ptr == NULL ) {
+ /* Start at beginning of skip list each time */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ HDassert( node_ptr != NULL );
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr == NULL 1 ?!?!");
- }
-#ifndef NDEBUG
- HDassert( next_entry_ptr->magic ==
- H5C__H5C_CACHE_ENTRY_T_MAGIC );
-#endif /* NDEBUG */
- HDassert( next_entry_ptr->is_dirty );
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if ( NULL == next_entry_ptr )
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
HDassert( next_entry_ptr->in_slist );
- } else {
-
- next_entry_ptr = NULL;
-
- }
-
- HDassert( node_ptr != NULL );
-
#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry flush callback can cause entries
- * to be dirtied, resized, and/or renamed.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry flush callback can cause entries
+ * to be dirtied, resized, and/or renamed.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
- /* We then zero counters that we use to track the number
- * and total size of entries flushed:
- */
- flushed_entries_count = 0;
- flushed_entries_size = 0;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, and/or flushed during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* We then zero counters that we use to track the number
+ * and total size of entries flushed:
+ */
+ flushed_entries_count = 0;
+ flushed_entries_size = 0;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, and/or flushed during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
#endif /* H5C_DO_SANITY_CHECKS */
- while ( node_ptr != NULL )
- {
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, it is possible
- * that the flush callback will dirty and/or resize
- * other entries in the cache. In particular, while
- * Quincey has promised me that this will never happen,
- * it is possible that the flush callback for an
- * entry may protect an entry that is not in the cache,
- * perhaps causing the cache to flush and possibly
- * evict the entry associated with node_ptr to make
- * space for the new entry.
- *
- * Thus we do a bit of extra sanity checking on entry_ptr,
- * and break out of this scan of the skip list if we
- * detect minor problems. We have a bit of leaway on the
- * number of passes though the skip list, so this shouldn't
- * be an issue in the flush in and of itself, as it should
- * be all but impossible for this to happen more than once
- * in any flush.
- *
- * Observe that that breaking out of the scan early
- * shouldn't break the sanity checks just after the end
- * of this while loop.
- *
- * If an entry has merely been marked clean and removed from
- * the s-list, we simply break out of the scan.
- *
- * If the entry has been evicted, we flag an error and
- * exit.
- */
+ while ( node_ptr != NULL )
+ {
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, it is possible
+ * that the flush callback will dirty and/or resize
+ * other entries in the cache. In particular, while
+ * Quincey has promised me that this will never happen,
+ * it is possible that the flush callback for an
+ * entry may protect an entry that is not in the cache,
+ * perhaps causing the cache to flush and possibly
+ * evict the entry associated with node_ptr to make
+ * space for the new entry.
+ *
+ * Thus we do a bit of extra sanity checking on entry_ptr,
+ * and break out of this scan of the skip list if we
+ * detect minor problems. We have a bit of leaway on the
+ * number of passes though the skip list, so this shouldn't
+ * be an issue in the flush in and of itself, as it should
+ * be all but impossible for this to happen more than once
+ * in any flush.
+ *
+ * Observe that that breaking out of the scan early
+ * shouldn't break the sanity checks just after the end
+ * of this while loop.
+ *
+ * If an entry has merely been marked clean and removed from
+ * the s-list, we simply break out of the scan.
+ *
+ * If the entry has been evicted, we flag an error and
+ * exit.
+ */
#ifndef NDEBUG
- if ( entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
+ if ( entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "entry_ptr->magic invalid ?!?!");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry_ptr->magic invalid ?!?!");
- } else
+ } else
#endif /* NDEBUG */
- if ( ( ! entry_ptr->is_dirty ) ||
- ( ! entry_ptr->in_slist ) ) {
+ if ( ( ! entry_ptr->is_dirty ) ||
+ ( ! entry_ptr->in_slist ) ) {
- /* the s-list has been modified out from under us.
- * set node_ptr to NULL and break out of the loop.
- */
- node_ptr = NULL;
- break;
- }
+ /* the s-list has been modified out from under us.
+ * set node_ptr to NULL and break out of the inner loop.
+ */
+ node_ptr = NULL;
+ goto end_of_inner_loop;;
+ }
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
- if ( node_ptr != NULL ) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if ( node_ptr != NULL ) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( next_entry_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr == NULL 2 ?!?!");
+ if ( next_entry_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL 2 ?!?!");
+ }
+ HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
+ } else {
+ next_entry_ptr = NULL;
}
-#ifndef NDEBUG
- HDassert( next_entry_ptr->magic ==
- H5C__H5C_CACHE_ENTRY_T_MAGIC );
-#endif /* NDEBUG */
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- } else {
- next_entry_ptr = NULL;
- }
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->in_slist );
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->in_slist );
- if ( ( ! flush_marked_entries ) ||
- ( entry_ptr->flush_marker ) ) {
+ if ( ( ! flush_marked_entries ) ||
+ ( entry_ptr->flush_marker ) ) {
- if ( entry_ptr->is_protected ) {
+ if ( entry_ptr->is_protected ) {
- /* we probably have major problems -- but lets flush
- * everything we can before we decide whether to flag
- * an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
-
- } else if ( entry_ptr->is_pinned ) {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
- */
- if ( TRUE ) { /* When we get to multithreaded cache,
- * we will need either locking code,
- * and/or a test to see if the entry
- * is in flushable condition here.
- */
+ /* we probably have major problems -- but lets flush
+ * everything we can before we decide whether to flag
+ * an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+
+ } else if ( entry_ptr->is_pinned ) {
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush. Note that we
+ * aren't trying to do a destroy here, so that
+ * is not an issue.
+ */
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += entry_ptr->size;
+ flushed_entries_count++;
+ flushed_entries_size += entry_ptr->size;
#endif /* H5C_DO_SANITY_CHECKS */
- status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- cache_ptr,
- NULL,
- entry_ptr->addr,
- flags,
- &first_flush,
- FALSE);
- if ( status < 0 ) {
-
- /* This shouldn't happen -- if it does, we are
- * toast so just scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "dirty pinned entry flush failed.")
- }
- flushed_entries_last_pass = TRUE;
- }
- } else {
+ status = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ NULL,
+ entry_ptr->addr,
+ flags,
+ &first_flush,
+ FALSE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are
+ * toast so just scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "dirty pinned entry flush failed.")
+ }
+ flushed_during_dep_loop = TRUE;
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } else {
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush. Note that we
+ * aren't trying to do a destroy here, so that
+ * is not an issue.
+ */
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += entry_ptr->size;
+ flushed_entries_count++;
+ flushed_entries_size += entry_ptr->size;
#endif /* H5C_DO_SANITY_CHECKS */
- status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- cache_ptr,
- NULL,
- entry_ptr->addr,
- flags,
- &first_flush,
- FALSE);
- if ( status < 0 ) {
+ status = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ NULL,
+ entry_ptr->addr,
+ flags,
+ &first_flush,
+ FALSE);
+ if ( status < 0 ) {
+
+ /* This shouldn't happen -- if it does, we are
+ * toast so just scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't flush entry.")
+ }
+ flushed_during_dep_loop = TRUE;
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end else */
+ } /* end if */
+ } /* while ( node_ptr != NULL ) */
+
+ /* Check for incrementing flush dependency height */
+ if(flushed_during_dep_loop) {
+ /* If we flushed an entry at this flush dependency height
+ * start over at the bottom level of the flush dependencies
+ */
+ curr_flush_dep_height = 0;
- /* This shouldn't happen -- if it does, we are
- * toast so just scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "Can't flush entry.")
- }
- flushed_entries_last_pass = TRUE;
- }
- }
- } /* while ( node_ptr != NULL ) */
+ /* Make certain we don't get stuck in an infinite loop */
+ flush_dep_passes++;
+
+ /* Set flag for outer loop */
+ flushed_entries_last_pass = TRUE;
+ } /* end if */
+ else
+ curr_flush_dep_height++;
+
+ } /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
+end_of_inner_loop:
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
@@ -4802,9 +4808,7 @@ H5C_insert_entry(H5F_t * f,
void * thing,
unsigned int flags)
{
- /* const char * fcn_name = "H5C_insert_entry()"; */
herr_t result;
- herr_t ret_value = SUCCEED; /* Return value */
hbool_t first_flush = TRUE;
hbool_t insert_pinned;
hbool_t set_flush_marker;
@@ -4812,6 +4816,8 @@ H5C_insert_entry(H5F_t * f,
size_t empty_space;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * test_entry_ptr;
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5C_insert_entry, FAIL)
@@ -4900,6 +4906,12 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->destroy_in_progress = FALSE;
entry_ptr->free_file_space_on_destroy = FALSE;
+ /* Initialize flush dependency height fields */
+ entry_ptr->flush_dep_parent = NULL;
+ for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
+ entry_ptr->child_flush_dep_height_rc[u] = 0;
+ entry_ptr->flush_dep_height = 0;
+
entry_ptr->ht_next = NULL;
entry_ptr->ht_prev = NULL;
@@ -7661,20 +7673,14 @@ H5C_stats__reset(H5C_t UNUSED * cache_ptr)
/*-------------------------------------------------------------------------
* Function: H5C_unpin_entry()
*
- * Purpose: Unpin a cache entry. The entry must be unprotected at
- * the time of call, and must be pinned.
+ * Purpose: Unpin a cache entry. The entry can be either protected or
+ * unprotected at the time of call, but must be pinned.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 3/22/06
*
- * Modifications:
- *
- * JRM -- 4/26/06
- * Modified routine to allow it to operate on protected
- * entries.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -8490,6 +8496,342 @@ done:
} /* H5C_validate_resize_config() */
+/*-------------------------------------------------------------------------
+ * Function: H5C_adjust_flush_dependency_rc()
+ *
+ * Purpose: "Atomicly" adjust flush dependency ref. counts for an entry,
+ * as a result of a flush dependency child's height changing.
+ *
+ * Note: Entry will remain in flush dependency relationship with its
+ * child entry (i.e. it's not going to get unpinned as a result
+ * of this change), but change could trickle upward, if this
+ * entry's height changes and it has a flush dependency parent.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+H5C_adjust_flush_dependency_rc(H5C_cache_entry_t * cache_entry,
+ unsigned old_child_height, unsigned new_child_height)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5C_adjust_flush_dependency_rc)
+
+ /* Sanity checks */
+ HDassert(cache_entry);
+ HDassert(cache_entry->is_pinned);
+ HDassert(cache_entry->flush_dep_height > 0);
+ HDassert(cache_entry->flush_dep_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
+ HDassert(cache_entry->child_flush_dep_height_rc[old_child_height] > 0);
+ HDassert(old_child_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
+ HDassert(old_child_height != new_child_height);
+ HDassert(new_child_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
+
+ /* Adjust ref. counts for entry's flush dependency children heights */
+ cache_entry->child_flush_dep_height_rc[new_child_height]++;
+ cache_entry->child_flush_dep_height_rc[old_child_height]--;
+
+ /* Check for flush dependency height of entry increasing */
+ if((new_child_height + 1) > cache_entry->flush_dep_height) {
+
+ /* Check if entry has _its_ own parent flush dependency entry */
+ if(NULL != cache_entry->flush_dep_parent) {
+ /* Adjust flush dependency ref. counts on entry's parent */
+ H5C_adjust_flush_dependency_rc(cache_entry->flush_dep_parent, cache_entry->flush_dep_height, new_child_height + 1);
+ } /* end if */
+
+ /* Set new flush dependency height of entry */
+ cache_entry->flush_dep_height = new_child_height + 1;
+ } /* end if */
+ else {
+ /* Check for child's flush dep. height decreasing and ref. count of
+ * old child height going to zero, it could mean the parent's
+ * flush dependency height dropped.
+ */
+ if((new_child_height < old_child_height)
+ && ((old_child_height + 1) == cache_entry->flush_dep_height)
+ && (0 == cache_entry->child_flush_dep_height_rc[old_child_height])) {
+ int i; /* Local index variable */
+
+ /* Re-scan child flush dependency height ref. counts to determine
+ * this entry's height.
+ */
+#ifndef NDEBUG
+ for(i = (H5C__NUM_FLUSH_DEP_HEIGHTS - 1); i > (int)new_child_height; i--)
+ HDassert(0 == cache_entry->child_flush_dep_height_rc[i]);
+#endif /* NDEBUG */
+ for(i = (int)new_child_height; i >= 0; i--)
+ /* Check for child flush dependencies of this height */
+ if(cache_entry->child_flush_dep_height_rc[i] > 0)
+ break;
+
+ /* Sanity checks */
+ HDassert((unsigned)(i + 1) < cache_entry->flush_dep_height);
+
+ /* Check if entry has _its_ own parent flush dependency entry */
+ if(NULL != cache_entry->flush_dep_parent) {
+ /* Adjust flush dependency ref. counts on entry's parent */
+ H5C_adjust_flush_dependency_rc(cache_entry->flush_dep_parent, cache_entry->flush_dep_height, (unsigned)(i + 1));
+ } /* end if */
+
+ /* Set new flush dependency height of entry */
+ cache_entry->flush_dep_height = (unsigned)(i + 1);
+ } /* end if */
+ } /* end else */
+
+
+ /* Post-conditions, for successful operation */
+ HDassert(cache_entry->is_pinned);
+ HDassert(cache_entry->flush_dep_height > 0);
+ HDassert(cache_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
+ HDassert(cache_entry->child_flush_dep_height_rc[new_child_height] > 0);
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5C_adjust_flush_dependency_rc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_create_flush_dependency()
+ *
+ * Purpose: Initiates a parent<->child entry flush dependency. The parent
+ * entry must be protected at the time of call, and must have all
+ * dependencies removed before the cache can shut down.
+ *
+ * Note: Flush dependencies in the cache indicate that a child entry
+ * must be flushed to the file before its parent. (This is
+ * currently used to implement Single-Writer/Multiple-Reader (SWMR)
+ * I/O access for data structures in the file).
+ *
+ * Each child entry can have only one parent entry, but parent
+ * entries can have >1 child entries. The flush dependency
+ * height of a parent entry is one greater than the max. flush
+ * dependency height of its children.
+ *
+ * Creating a flush dependency between two entries will also pin
+ * the parent entry. (The parent entry must _not_ be pinned
+ * through some other mechanism)
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5C_create_flush_dependency(H5C_t * cache_ptr, void * parent_thing,
+ void * child_thing)
+#else
+herr_t
+H5C_create_flush_dependency(H5C_t UNUSED * cache_ptr, void * parent_thing,
+ void * child_thing)
+#endif
+{
+ H5C_cache_entry_t * parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
+ H5C_cache_entry_t * child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
+#ifndef NDEBUG
+ unsigned prev_flush_dep_height = parent_entry->flush_dep_height; /* Previous flush height for parent entry */
+#endif /* NDEBUG */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_create_flush_dependency, FAIL)
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(parent_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+ HDassert(child_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
+
+ /* More sanity checks */
+ if(child_entry == parent_entry)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
+ if(!parent_entry->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't protected")
+ if(NULL != child_entry->flush_dep_parent)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry already has flush dependency parent")
+ {
+ H5C_cache_entry_t *tmp_entry = parent_entry; /* Temporary cache entry in flush dependency chain */
+ unsigned tmp_flush_height = 0; /* Different in heights of parent entry */
+
+ /* Find the top entry in the flush dependency list */
+ while(NULL != tmp_entry->flush_dep_parent) {
+ tmp_flush_height++;
+ tmp_entry = tmp_entry->flush_dep_parent;
+ } /* end while */
+
+ /* Check if we will make the dependency chain too long */
+ if((tmp_flush_height + child_entry->flush_dep_height + 1)
+ > H5C__NUM_FLUSH_DEP_HEIGHTS)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Combined flush dependency height too large")
+ }
+
+ /* Check for parent already pinned */
+ if(parent_entry->is_pinned) {
+ /* Verify that the parent entry was pinned through a flush dependency relationship */
+ if(0 == parent_entry->flush_dep_height)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry wasn't pinned through flush dependency")
+ } /* end if */
+ else {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_height == 0);
+
+ /* Pin the parent entry */
+ parent_entry->is_pinned = TRUE;
+ H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
+ } /* end else */
+
+ /* Increment ref. count for parent's flush dependency children heights */
+ parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]++;
+
+ /* Check for increasing parent flush dependency height */
+ if((child_entry->flush_dep_height + 1) > parent_entry->flush_dep_height) {
+
+ /* Check if parent entry has _its_ own parent flush dependency entry */
+ if(NULL != parent_entry->flush_dep_parent) {
+ /* Adjust flush dependency ref. counts on parent entry's parent */
+ H5C_adjust_flush_dependency_rc(parent_entry->flush_dep_parent, parent_entry->flush_dep_height, (child_entry->flush_dep_height + 1));
+ } /* end if */
+
+ /* Increase flush dependency height of parent entry */
+ parent_entry->flush_dep_height = child_entry->flush_dep_height + 1;
+ } /* end if */
+
+ /* Set parent for child entry */
+ child_entry->flush_dep_parent = parent_entry;
+
+
+ /* Post-conditions, for successful operation */
+ HDassert(parent_entry->is_pinned);
+ HDassert(parent_entry->flush_dep_height > 0);
+ HDassert(parent_entry->flush_dep_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
+ HDassert(prev_flush_dep_height <= parent_entry->flush_dep_height);
+ HDassert(parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height] > 0);
+ HDassert(NULL != child_entry->flush_dep_parent);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_create_flush_dependency() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_destroy_flush_dependency()
+ *
+ * Purpose: Terminates a parent<-> child entry flush dependency. The
+ * parent entry must be pinned and have a positive flush
+ * dependency height (which could go to zero as a result of
+ * this operation).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_destroy_flush_dependency(H5C_t * cache_ptr, void *parent_thing,
+ void * child_thing)
+{
+ H5C_cache_entry_t * parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
+ H5C_cache_entry_t * child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
+#ifndef NDEBUG
+ unsigned prev_flush_dep_height = parent_entry->flush_dep_height; /* Previous flush height for parent entry */
+#endif /* NDEBUG */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5C_destroy_flush_dependency, FAIL)
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(parent_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
+ HDassert(child_entry);
+ HDassert(child_entry->flush_dep_parent != child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+
+ /* Usage checks */
+ if(!parent_entry->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
+ if(0 == parent_entry->flush_dep_height)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't a flush dependency parent")
+ if(NULL == child_entry->flush_dep_parent)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Child entry doesn't have a flush dependency parent")
+ if(0 == parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height])
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry flush dependency ref. count has no child entries of this height")
+ if(child_entry->flush_dep_parent != parent_entry)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't flush dependency parent for child entry")
+
+ /* Decrement the ref. count for flush dependency height of children for parent entry */
+ parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]--;
+
+ /* Check for flush dependency ref. count at this height going to zero and
+ * parent entry flush dependency height dropping
+ */
+ if(((child_entry->flush_dep_height + 1) == parent_entry->flush_dep_height) &&
+ 0 == parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]) {
+ int i; /* Local index variable */
+
+ /* Reverse scan for new flush dependency height of parent */
+#ifndef NDEBUG
+ for(i = (H5C__NUM_FLUSH_DEP_HEIGHTS - 1); i > (int)child_entry->flush_dep_height; i--)
+ HDassert(0 == parent_entry->child_flush_dep_height_rc[i]);
+#endif /* NDEBUG */
+ for(i = (int)child_entry->flush_dep_height; i >= 0; i--)
+ /* Check for child flush dependencies of this height */
+ if(parent_entry->child_flush_dep_height_rc[i] > 0)
+ break;
+
+ /* Sanity check */
+ HDassert((unsigned)(i + 1) < parent_entry->flush_dep_height);
+
+ /* Check if parent entry is a child in another flush dependency relationship */
+ if(NULL != parent_entry->flush_dep_parent) {
+ /* Change flush dependency ref. counts of parent's parent */
+ H5C_adjust_flush_dependency_rc(parent_entry->flush_dep_parent, parent_entry->flush_dep_height, (unsigned)(i + 1));
+ } /* end if */
+
+ /* Increase flush dependency height of parent entry */
+ parent_entry->flush_dep_height = (unsigned)(i + 1);
+
+ /* Check for height of parent dropping to zero (i.e. no longer a
+ * parent of _any_ child flush dependencies).
+ */
+ if(0 == parent_entry->flush_dep_height) {
+ if(!parent_entry->is_protected)
+ H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, parent_entry, FAIL)
+
+ /* Unpin parent entry */
+ parent_entry->is_pinned = FALSE;
+ H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, parent_entry)
+ } /* end if */
+ } /* end if */
+
+ /* Reset parent of child entry */
+ child_entry->flush_dep_parent = NULL;
+
+ /* Post-conditions, for successful operation */
+ HDassert(prev_flush_dep_height >= parent_entry->flush_dep_height);
+ HDassert(NULL == child_entry->flush_dep_parent);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_destroy_flush_dependency() */
+
+
/*************************************************************************/
/**************************** Private Functions: *************************/
/*************************************************************************/
@@ -9874,8 +10216,6 @@ H5C_flush_invalidate_cache(H5F_t * f,
herr_t ret_value = SUCCEED;
hbool_t done = FALSE;
hbool_t first_flush = TRUE;
- hbool_t first_pass = TRUE;
- hbool_t have_pinned_entries;
int32_t protected_entries = 0;
int32_t i;
int32_t cur_pel_len;
@@ -9952,347 +10292,376 @@ H5C_flush_invalidate_cache(H5F_t * f,
while ( ! done )
{
- first_pass = FALSE;
+ unsigned curr_flush_dep_height = 0;
+ unsigned flush_dep_passes = 0;
- have_pinned_entries = ( cur_pel_len > 0 );
+ /* Loop over all flush dependency heights of entries */
+ while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
+ (cache_ptr->index_len > 0 ) &&
+ (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH) )
+ {
+ hbool_t flushed_during_dep_loop = FALSE;
- /* first, try to flush-destroy any dirty entries. Do this by
- * making a scan through the slist. Note that new dirty entries
- * may be created by the flush call backs. Thus it is possible
- * that the slist will not be empty after we finish the scan.
- */
+ /* first, try to flush-destroy any dirty entries. Do this by
+ * making a scan through the slist. Note that new dirty entries
+ * may be created by the flush call backs. Thus it is possible
+ * that the slist will not be empty after we finish the scan.
+ */
- if ( cache_ptr->slist_len == 0 ) {
+ if ( cache_ptr->slist_len == 0 ) {
- node_ptr = NULL;
- HDassert( cache_ptr->slist_size == 0 );
+ node_ptr = NULL;
+ HDassert( cache_ptr->slist_size == 0 );
- } else {
+ } else {
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if ( node_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "slist_len != 0 && node_ptr == NULL");
- }
+ if ( node_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "slist_len != 0 && node_ptr == NULL");
+ }
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( next_entry_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr == NULL 1 ?!?!");
- }
-#ifndef NDEBUG
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
-#endif /* NDEBUG */
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
+ if ( next_entry_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL 1 ?!?!");
+ }
+ HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
- }
+ }
#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C_flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
- * slist length and size before we do any flushes.
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* There is also the possibility that entries will be
- * dirtied, resized, and/or renamed as the result of
- * calls to the flush callbacks. We use the slist_len_increase
- * and slist_size_increase increase fields in struct H5C_t
- * to track these changes for purpose of sanity checking.
- * To this end, we must zero these fields before we start
- * the pass through the slist.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* Depending on circumstances, H5C_flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
+ * slist length and size before we do any flushes.
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
- /* Finally, reset the actual_slist_len and actual_slist_size
- * fields to zero, as these fields are used to accumulate
- * the slist lenght and size that we see as we scan through
- * the slist.
- */
- actual_slist_len = 0;
- actual_slist_size = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
+ /* There is also the possibility that entries will be
+ * dirtied, resized, and/or renamed as the result of
+ * calls to the flush callbacks. We use the slist_len_increase
+ * and slist_size_increase increase fields in struct H5C_t
+ * to track these changes for purpose of sanity checking.
+ * To this end, we must zero these fields before we start
+ * the pass through the slist.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
- while ( node_ptr != NULL )
- {
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, it is possible
- * that the flush callback will dirty and/or resize
- * other entries in the cache. In particular, while
- * Quincey has promised me that this will never happen,
- * it is possible that the flush callback for an
- * entry may protect an entry that is not in the cache,
- * perhaps causing the cache to flush and possibly
- * evict the entry associated with node_ptr to make
- * space for the new entry.
- *
- * Thus we do a bit of extra sanity checking on entry_ptr,
- * and break out of this scan of the skip list if we
- * detect major problems. We have a bit of leaway on the
- * number of passes though the skip list, so this shouldn't
- * be an issue in the flush in and of itself, as it should
- * be all but impossible for this to happen more than once
- * in any flush.
- *
- * Observe that that breaking out of the scan early
- * shouldn't break the sanity checks just after the end
- * of this while loop.
- *
- * If an entry has merely been marked clean and removed from
- * the s-list, we simply break out of the scan.
- *
- * If the entry has been evicted, we flag an error and
- * exit.
+ /* Finally, reset the actual_slist_len and actual_slist_size
+ * fields to zero, as these fields are used to accumulate
+ * the slist lenght and size that we see as we scan through
+ * the slist.
*/
+ actual_slist_len = 0;
+ actual_slist_size = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ while ( node_ptr != NULL )
+ {
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, it is possible
+ * that the flush callback will dirty and/or resize
+ * other entries in the cache. In particular, while
+ * Quincey has promised me that this will never happen,
+ * it is possible that the flush callback for an
+ * entry may protect an entry that is not in the cache,
+ * perhaps causing the cache to flush and possibly
+ * evict the entry associated with node_ptr to make
+ * space for the new entry.
+ *
+ * Thus we do a bit of extra sanity checking on entry_ptr,
+ * and break out of this scan of the skip list if we
+ * detect major problems. We have a bit of leaway on the
+ * number of passes though the skip list, so this shouldn't
+ * be an issue in the flush in and of itself, as it should
+ * be all but impossible for this to happen more than once
+ * in any flush.
+ *
+ * Observe that that breaking out of the scan early
+ * shouldn't break the sanity checks just after the end
+ * of this while loop.
+ *
+ * If an entry has merely been marked clean and removed from
+ * the s-list, we simply break out of the scan.
+ *
+ * If the entry has been evicted, we flag an error and
+ * exit.
+ */
#ifndef NDEBUG
- if ( entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
+ if ( entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "entry_ptr->magic is invalid ?!?!");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry_ptr->magic is invalid ?!?!");
- } else
+ } else
#endif /* NDEBUG */
- if ( ( ! entry_ptr->is_dirty ) ||
- ( ! entry_ptr->in_slist ) ) {
+ if ( ( ! entry_ptr->is_dirty ) ||
+ ( ! entry_ptr->in_slist ) ) {
- /* the s-list has been modified out from under us.
- * break out of the loop.
- */
- break;
- }
+ /* the s-list has been modified out from under us.
+ * break out of the loop.
+ */
+ goto end_of_inner_loop;;
+ }
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
- node_ptr = H5SL_next(node_ptr);
- if ( node_ptr != NULL ) {
+ node_ptr = H5SL_next(node_ptr);
+ if ( node_ptr != NULL ) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( next_entry_ptr == NULL ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr == NULL 2 ?!?!");
- }
-#ifndef NDEBUG
- HDassert( next_entry_ptr->magic ==
- H5C__H5C_CACHE_ENTRY_T_MAGIC );
-#endif /* NDEBUG */
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
+ if ( next_entry_ptr == NULL ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL 2 ?!?!");
+ }
+ HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert( next_entry_ptr->is_dirty );
+ HDassert( next_entry_ptr->in_slist );
- } else {
+ } else {
- next_entry_ptr = NULL;
- }
+ next_entry_ptr = NULL;
+ }
- /* Note that we now remove nodes from the slist as we flush
- * the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
- * the slist.
- *
- * While this optimization used to be easy, with the possibility
- * of new entries being added to the slist in the midst of the
- * flush, we must keep the slist in cannonical form at all
- * times.
- */
+ /* Note that we now remove nodes from the slist as we flush
+ * the associated entries, instead of leaving them there
+ * until we are done, and then destroying all nodes in
+ * the slist.
+ *
+ * While this optimization used to be easy, with the possibility
+ * of new entries being added to the slist in the midst of the
+ * flush, we must keep the slist in cannonical form at all
+ * times.
+ */
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->in_slist );
+ HDassert( entry_ptr != NULL );
+ HDassert( entry_ptr->in_slist );
#if H5C_DO_SANITY_CHECKS
- /* update actual_slist_len & actual_slist_size before
- * the flush. Note that the entry will be removed
- * from the slist after the flush, and thus may be
- * resized by the flush callback. This is OK, as
- * we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- * Note that we include pinned entries in this count, even
- * though we will not actually flush them.
- */
- actual_slist_len++;
- actual_slist_size += entry_ptr->size;
+ /* update actual_slist_len & actual_slist_size before
+ * the flush. Note that the entry will be removed
+ * from the slist after the flush, and thus may be
+ * resized by the flush callback. This is OK, as
+ * we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ * Note that we include pinned entries in this count, even
+ * though we will not actually flush them.
+ */
+ actual_slist_len++;
+ actual_slist_size += entry_ptr->size;
#endif /* H5C_DO_SANITY_CHECKS */
- if ( entry_ptr->is_protected ) {
-
- /* we have major problems -- but lets flush
- * everything we can before we flag an error.
- */
- protected_entries++;
+ if ( entry_ptr->is_protected ) {
- } else if ( entry_ptr->is_pinned ) {
+ /* we have major problems -- but lets flush
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush, but don't tell
- * H5C_flush_single_entry() to destroy the entry
- * as pinned entries can't be evicted.
- */
- if ( TRUE ) { /* When we get to multithreaded cache,
- * we will need either locking code, and/or
- * a test to see if the entry is in flushable
- * condition here.
- */
+ } else if ( entry_ptr->is_pinned ) {
- status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- cache_ptr,
- NULL,
- entry_ptr->addr,
- H5C__NO_FLAGS_SET,
- &first_flush,
- FALSE);
- if ( status < 0 ) {
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush, but don't tell
+ * H5C_flush_single_entry() to destroy the entry
+ * as pinned entries can't be evicted.
+ */
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
+ status = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ NULL,
+ entry_ptr->addr,
+ H5C__NO_FLAGS_SET,
+ &first_flush,
+ FALSE);
+ if ( status < 0 ) {
- /* This shouldn't happen -- if it does, we are toast
- * so just scream and die.
- */
+ /* This shouldn't happen -- if it does, we are toast
+ * so just scream and die.
+ */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "dirty pinned entry flush failed.")
- }
- }
- } else {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "dirty pinned entry flush failed.")
+ } /* end if */
+ flushed_during_dep_loop = TRUE;
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end if */
+ else {
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
- status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- cache_ptr,
- NULL,
- entry_ptr->addr,
- (cooked_flags |
- H5C__FLUSH_INVALIDATE_FLAG),
- &first_flush,
- TRUE);
- if ( status < 0 ) {
+ status = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ NULL,
+ entry_ptr->addr,
+ (cooked_flags |
+ H5C__FLUSH_INVALIDATE_FLAG),
+ &first_flush,
+ TRUE);
+ if ( status < 0 ) {
- /* This shouldn't happen -- if it does, we are toast so
- * just scream and die.
- */
+ /* This shouldn't happen -- if it does, we are toast so
+ * just scream and die.
+ */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "dirty entry flush destroy failed.")
- }
- }
- } /* end while loop scanning skip list */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "dirty entry flush destroy failed.")
+ } /* end if */
+ flushed_during_dep_loop = TRUE;
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end else */
+ } /* end while loop scanning skip list */
#if H5C_DO_SANITY_CHECKS
- /* It is possible that entries were added to the slist during
- * the scan, either before or after scan pointer. The following
- * asserts take this into account.
- *
- * Don't bother with the sanity checks if node_ptr != NULL, as
- * in this case we broke out of the loop because it got changed
- * out from under us.
- */
+ /* It is possible that entries were added to the slist during
+ * the scan, either before or after scan pointer. The following
+ * asserts take this into account.
+ *
+ * Don't bother with the sanity checks if node_ptr != NULL, as
+ * in this case we broke out of the loop because it got changed
+ * out from under us.
+ */
- if ( node_ptr == NULL ) {
+ if ( node_ptr == NULL ) {
- HDassert( (actual_slist_len + cache_ptr->slist_len) ==
- (initial_slist_len + cache_ptr->slist_len_increase) );
- HDassert( (actual_slist_size + cache_ptr->slist_size) ==
- (initial_slist_size + cache_ptr->slist_size_increase) );
- }
+ HDassert( (actual_slist_len + cache_ptr->slist_len) ==
+ (initial_slist_len + cache_ptr->slist_len_increase) );
+ HDassert( (actual_slist_size + cache_ptr->slist_size) ==
+ (initial_slist_size + cache_ptr->slist_size_increase) );
+ }
#endif /* H5C_DO_SANITY_CHECKS */
- /* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain.
- *
- * It used to be that all entries remaining in the cache at
- * this point had to be clean, but with the fractal heap mods
- * this may not be the case. If so, we will flush entries out
- * of increasing address order.
- *
- * Writes to disk are possible here.
- */
- for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
- {
- next_entry_ptr = cache_ptr->index[i];
-
- while ( next_entry_ptr != NULL )
+ /* Since we are doing a destroy, we must make a pass through
+ * the hash table and try to flush - destroy all entries that
+ * remain.
+ *
+ * It used to be that all entries remaining in the cache at
+ * this point had to be clean, but with the fractal heap mods
+ * this may not be the case. If so, we will flush entries out
+ * of increasing address order.
+ *
+ * Writes to disk are possible here.
+ */
+ for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
{
- entry_ptr = next_entry_ptr;
+ next_entry_ptr = cache_ptr->index[i];
- next_entry_ptr = entry_ptr->ht_next;
-#ifndef NDEBUG
- HDassert ( ( next_entry_ptr == NULL ) ||
- ( next_entry_ptr->magic ==
- H5C__H5C_CACHE_ENTRY_T_MAGIC ) );
-#endif /* NDEBUG */
- if ( entry_ptr->is_protected ) {
+ while ( next_entry_ptr != NULL )
+ {
+ entry_ptr = next_entry_ptr;
- /* we have major problems -- but lets flush and destroy
- * everything we can before we flag an error.
- */
- protected_entries++;
+ next_entry_ptr = entry_ptr->ht_next;
+ HDassert ( ( next_entry_ptr == NULL ) ||
+ ( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC ) );
+ if ( entry_ptr->is_protected ) {
- if ( ! entry_ptr->in_slist ) {
+ /* we have major problems -- but lets flush and destroy
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
- HDassert( !(entry_ptr->is_dirty) );
- }
- } else if ( ! ( entry_ptr->is_pinned ) ) {
+ if ( ! entry_ptr->in_slist ) {
- status = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- cache_ptr,
- NULL,
- entry_ptr->addr,
- (cooked_flags |
- H5C__FLUSH_INVALIDATE_FLAG),
- &first_flush,
- TRUE);
- if ( status < 0 ) {
+ HDassert( !(entry_ptr->is_dirty) );
+ }
+ } else if ( ! ( entry_ptr->is_pinned ) ) {
- /* This shouldn't happen -- if it does, we are toast so
- * just scream and die.
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush.
*/
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height ){
+ status = H5C_flush_single_entry(f,
+ primary_dxpl_id,
+ secondary_dxpl_id,
+ cache_ptr,
+ NULL,
+ entry_ptr->addr,
+ (cooked_flags |
+ H5C__FLUSH_INVALIDATE_FLAG),
+ &first_flush,
+ TRUE);
+ if ( status < 0 ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "Entry flush destroy failed.")
- }
- }
- /* We can't do anything if the entry is pinned. The
- * hope is that the entry will be unpinned as the
- * result of destroys of entries that reference it.
- *
- * We detect this by noting the change in the number
- * of pinned entries from pass to pass. If it stops
- * shrinking before it hits zero, we scream and die.
- */
- /* if the flush function on the entry we last evicted
- * loaded an entry into cache (as Quincey has promised me
- * it never will), and if the cache was full, it is
- * possible that *next_entry_ptr was flushed or evicted.
- *
- * Test to see if this happened here. Note that if this
- * test is triggred, we are accessing a deallocated piece
- * of dynamically allocated memory, so we just scream and
- * die.
- */
-#ifndef NDEBUG
- if ( ( next_entry_ptr != NULL ) &&
- ( next_entry_ptr->magic !=
- H5C__H5C_CACHE_ENTRY_T_MAGIC ) ) {
+ /* This shouldn't happen -- if it does, we are toast so
+ * just scream and die.
+ */
- /* Something horrible has happened to
- * *next_entry_ptr -- scream and die.
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Entry flush destroy failed.")
+ }
+ flushed_during_dep_loop = TRUE;
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end if */
+ /* We can't do anything if the entry is pinned. The
+ * hope is that the entry will be unpinned as the
+ * result of destroys of entries that reference it.
+ *
+ * We detect this by noting the change in the number
+ * of pinned entries from pass to pass. If it stops
+ * shrinking before it hits zero, we scream and die.
*/
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr->magic is invalid?!?!?.")
- }
+ /* if the flush function on the entry we last evicted
+ * loaded an entry into cache (as Quincey has promised me
+ * it never will), and if the cache was full, it is
+ * possible that *next_entry_ptr was flushed or evicted.
+ *
+ * Test to see if this happened here. Note that if this
+ * test is triggred, we are accessing a deallocated piece
+ * of dynamically allocated memory, so we just scream and
+ * die.
+ */
+#ifndef NDEBUG
+ if ( ( next_entry_ptr != NULL ) &&
+ ( next_entry_ptr->magic !=
+ H5C__H5C_CACHE_ENTRY_T_MAGIC ) ) {
+
+ /* Something horrible has happened to
+ * *next_entry_ptr -- scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr->magic is invalid?!?!?.")
+ }
#endif /* NDEBUG */
- } /* end while loop scanning hash table bin */
- } /* end for loop scanning hash table */
+ } /* end while loop scanning hash table bin */
+ } /* end for loop scanning hash table */
+
+ /* Check for incrementing flush dependency height */
+ if(flushed_during_dep_loop) {
+ /* If we flushed an entry at this flush dependency height
+ * start over at the bottom level of the flush dependencies
+ */
+ curr_flush_dep_height = 0;
+
+ /* Make certain we don't get stuck in an infinite loop */
+ flush_dep_passes++;
+ } /* end if */
+ else
+ curr_flush_dep_height++;
+
+ } /* end while loop over flush dependency heights */
+end_of_inner_loop:
old_pel_len = cur_pel_len;
cur_pel_len = cache_ptr->pel_len;
@@ -11029,8 +11398,9 @@ H5C_load_entry(H5F_t * f,
#endif /* NDEBUG */
{
void * thing = NULL;
- void * ret_value = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
+ unsigned u; /* Local index variable */
+ void * ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5C_load_entry)
@@ -11097,6 +11467,12 @@ H5C_load_entry(H5F_t * f,
HDassert( entry_ptr->size < H5C_MAX_ENTRY_SIZE );
+ /* Initialize flush dependency height fields */
+ entry_ptr->flush_dep_parent = NULL;
+ for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
+ entry_ptr->child_flush_dep_height_rc[u] = 0;
+ entry_ptr->flush_dep_height = 0;
+
entry_ptr->ht_next = NULL;
entry_ptr->ht_prev = NULL;