summaryrefslogtreecommitdiffstats
path: root/src/H5C.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/H5C.c')
-rw-r--r--src/H5C.c2335
1 files changed, 1301 insertions, 1034 deletions
diff --git a/src/H5C.c b/src/H5C.c
index 991db94..ee4affa 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -74,7 +74,6 @@
/* Module Setup */
/****************/
-#define H5AC_PACKAGE /*suppress error about including H5ACpkg */
#define H5C_PACKAGE /*suppress error about including H5Cpkg */
#define H5F_PACKAGE /*suppress error about including H5Fpkg */
@@ -84,7 +83,7 @@
/***********/
#include "H5private.h" /* Generic Functions */
#ifdef H5_HAVE_PARALLEL
-#include "H5ACpkg.h" /* Metadata cache */
+#include "H5ACprivate.h" /* Metadata cache */
#endif /* H5_HAVE_PARALLEL */
#include "H5Cpkg.h" /* Cache */
#include "H5Eprivate.h" /* Error handling */
@@ -117,10 +116,6 @@
/* Local Prototypes */
/********************/
-/*
- * Private file-scope function declarations:
- */
-
static herr_t H5C__auto_adjust_cache_size(H5F_t * f,
hid_t dxpl_id,
hbool_t write_permitted);
@@ -152,6 +147,12 @@ static herr_t H5C_flush_invalidate_cache(const H5F_t * f,
hid_t dxpl_id,
unsigned flags);
+static herr_t H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id,
+ H5C_ring_t ring, unsigned flags);
+
+static herr_t H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned flags);
+
static void * H5C_load_entry(H5F_t * f,
hid_t dxpl_id,
const H5C_class_t * type,
@@ -188,6 +189,10 @@ static herr_t H5C_verify_len_eoa (H5F_t *f,
size_t *len,
htri_t actual);
+#if H5C_DO_TAGGING_SANITY_CHECKS
+static herr_t H5C_verify_tag(int id, haddr_t tag);
+#endif
+
#if H5C_DO_SLIST_SANITY_CHECKS
static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr,
H5C_cache_entry_t *target_ptr);
@@ -204,6 +209,11 @@ static void H5C__assert_flush_dep_nocycle(H5C_cache_entry_t * entry,
H5C_cache_entry_t * base_entry);
#endif /* NDEBUG */
+#if 0 /* debugging routines */
+herr_t H5C_dump_cache(H5C_t * cache_ptr, const char * cache_name);
+herr_t H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn);
+#endif /* debugging routines */
+
/*********************/
/* Package Variables */
/*********************/
@@ -246,7 +256,7 @@ H5FL_DEFINE_STATIC(haddr_t);
#define H5C__EPOCH_MARKER_TYPE H5C__MAX_NUM_TYPE_IDS
static herr_t H5C__epoch_marker_get_load_size(const void *image_ptr,
- const void *udata_ptr,
+ void *udata_ptr,
size_t *image_len_ptr,
size_t *actual_len,
hbool_t *compressed_ptr,
@@ -313,7 +323,7 @@ const H5C_class_t epoch_marker_class =
*
***************************************************************************/
static herr_t
-H5C__epoch_marker_get_load_size(const void H5_ATTR_UNUSED *image_ptr, const void H5_ATTR_UNUSED *udata_ptr,
+H5C__epoch_marker_get_load_size(const void H5_ATTR_UNUSED *image_ptr, void H5_ATTR_UNUSED *udata_ptr,
size_t H5_ATTR_UNUSED *image_len_ptr, size_t H5_ATTR_UNUSED *actual_len,
hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_len_ptr)
{
@@ -521,6 +531,8 @@ H5C_create(size_t max_cache_size,
cache_ptr->log_file_ptr = NULL;
+ cache_ptr->trace_file_ptr = NULL;
+
cache_ptr->aux_ptr = aux_ptr;
cache_ptr->max_type_id = max_type_id;
@@ -542,6 +554,16 @@ H5C_create(size_t max_cache_size,
cache_ptr->clean_index_size = (size_t)0;
cache_ptr->dirty_index_size = (size_t)0;
+ for(i = 0; i < H5C_RING_NTYPES; i++) {
+ cache_ptr->index_ring_len[i] = 0;
+ cache_ptr->index_ring_size[i] = (size_t)0;
+ cache_ptr->clean_index_ring_size[i] = (size_t)0;
+ cache_ptr->dirty_index_ring_size[i] = (size_t)0;
+
+ cache_ptr->slist_ring_len[i] = 0;
+ cache_ptr->slist_ring_size[i] = (size_t)0;
+ } /* end for */
+
/* Tagging Field Initializations */
cache_ptr->ignore_tags = FALSE;
@@ -556,10 +578,8 @@ H5C_create(size_t max_cache_size,
cache_ptr->slist_size_increase = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
- {
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++)
(cache_ptr->index)[i] = NULL;
- }
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
@@ -1129,53 +1149,84 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Changes: Modified function to test for slist chamges in
- * pre_serialize and serialize callbacks, and re-start
- * scans through the slist when such changes occur.
+ * Changes: Modified function to test for slist chamges in
+ * pre_serialize and serialize callbacks, and re-start
+ * scans through the slist when such changes occur.
*
- * This has been a potential problem for some time,
- * and there has been code in this function to deal
- * with elements of this issue. However the shift
- * to the V3 cache in combination with the activities
- * of some of the cache clients (in particular the
- * free space manager and the fractal heap) have
- * made this re-work necessary.
+ * This has been a potential problem for some time,
+ * and there has been code in this function to deal
+ * with elements of this issue. However the shift
+ * to the V3 cache in combination with the activities
+ * of some of the cache clients (in particular the
+ * free space manager and the fractal heap) have
+ * made this re-work necessary.
*
- * JRM -- 12/13/14
+ * JRM -- 12/13/14
+ *
+ * Modified function to support rings. Basic idea is that
+ * every entry in the cache is assigned to a ring. Entries
+ * in the outermost ring are flushed first, followed by
+ * those in the next outermost ring, and so on until the
+ * innermost ring is flushed. See header comment on
+ * H5C_ring_t in H5Cprivate.h for a more detailed
+ * discussion.
+ *
+ * JRM -- 8/30/15
*
*-------------------------------------------------------------------------
*/
herr_t
H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
{
- H5C_t * cache_ptr = f->shared->cache;
- herr_t ret_value = SUCCEED;
- hbool_t destroy;
- hbool_t flushed_entries_last_pass;
- hbool_t flush_marked_entries;
- hbool_t ignore_protected;
- hbool_t tried_to_flush_protected_entry = FALSE;
- hbool_t restart_slist_scan;
- int32_t protected_entries = 0;
- H5SL_node_t * node_ptr = NULL;
- H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t flushed_entries_count = 0;
- int64_t flushed_entries_size = 0;
- int64_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
- int64_t entry_size_change;
- int64_t * entry_size_change_ptr = &entry_size_change;
-#else /* H5C_DO_SANITY_CHECKS */
- int64_t * entry_size_change_ptr = NULL;
+ int i;
+ int32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ int32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ hbool_t destroy;
+ hbool_t ignore_protected;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( cache_ptr->slist_ptr );
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
@@ -1186,13 +1237,6 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
-
- /* note that flush_marked_entries is set to FALSE if destroy is TRUE */
- flush_marked_entries = ( ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 )
- &&
- ( ! destroy )
- );
-
HDassert( ! ( destroy && ignore_protected ) );
HDassert( ! ( cache_ptr->flush_in_progress ) );
@@ -1200,283 +1244,19 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
if(destroy) {
if(H5C_flush_invalidate_cache(f, dxpl_id, flags) < 0)
- /* This shouldn't happen -- if it does, we are toast so
- * just scream and die.
- */
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed.")
- } else {
- /* When we are only flushing marked entries, the slist will usually
- * still contain entries when we have flushed everything we should.
- * Thus we track whether we have flushed any entries in the last
- * pass, and terminate if we haven't.
- */
-
- flushed_entries_last_pass = TRUE;
-
- /* set the cache_ptr->slist_change_in_pre_serialize and
- * cache_ptr->slist_change_in_serialize to false.
- *
- * These flags are set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize or serialize call respectively.
- * H5C_flush_cache uses these flags to detect any modifications
- * to the slist that might corrupt the scan of the slist -- and
- * restart the scan in this event.
+ } /* end if */
+ else {
+ /* flush each ring, starting from the outermost ring and
+ * working inward.
*/
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- while ( ( cache_ptr->slist_len != 0 ) &&
- ( protected_entries == 0 ) &&
- ( flushed_entries_last_pass ) )
- {
- flushed_entries_last_pass = FALSE;
-
-#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* We then zero counters that we use to track the number
- * and total size of entries flushed:
- */
- flushed_entries_count = 0;
- flushed_entries_size = 0;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
-
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
-#endif /* H5C_DO_SANITY_CHECKS */
-
- restart_slist_scan = TRUE;
-
- while ( ( restart_slist_scan ) || ( node_ptr != NULL ) )
- {
- if ( restart_slist_scan )
- {
- restart_slist_scan = FALSE;
-
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
-
- if ( node_ptr == NULL )
- {
- /* the slist is empty -- break out of inner loop */
- break;
- }
- HDassert( node_ptr != NULL );
-
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
-
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- }
-
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, I have inserted code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
-
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
-
- if ( node_ptr != NULL ) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( NULL == next_entry_ptr )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- HDassert( entry_ptr != next_entry_ptr );
- } else {
- next_entry_ptr = NULL;
- }
-
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->in_slist );
-
- if ( ( ( ! flush_marked_entries ) ||
- ( entry_ptr->flush_marker ) ) &&
- ( ( ! entry_ptr->flush_me_last ) ||
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ||
- ( flush_marked_entries &&
- ( entry_ptr->flush_marker ) ) ) &&
- ( ( entry_ptr->flush_dep_nchildren == 0 ) ||
- ( ( ! destroy ) &&
- ( entry_ptr->flush_dep_ndirty_children == 0 ) ) ) ) {
-
- if ( entry_ptr->is_protected ) {
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
-
- } else if ( entry_ptr->is_pinned ) {
-#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0 )
- /* This shouldn't happen -- if it does, we are toast
- * so just scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
-
-#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
-
- flushed_entries_last_pass = TRUE;
- } /* end if */
- else {
-#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
- /* This shouldn't happen -- if it does, we are
- * toast so just scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
-#endif /* H5C_DO_SANITY_CHECKS */
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
-
- flushed_entries_last_pass = TRUE;
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
-
-#if H5C_DO_SANITY_CHECKS
- /* Verify that the slist size and length are as expected. */
-
- HDassert( (initial_slist_len + cache_ptr->slist_len_increase -
- flushed_entries_count) == cache_ptr->slist_len );
- HDassert( (size_t)((int64_t)initial_slist_size +
- cache_ptr->slist_size_increase -
- flushed_entries_size) == cache_ptr->slist_size );
-#endif /* H5C_DO_SANITY_CHECKS */
-
- } /* while */
-
- HDassert( protected_entries <= cache_ptr->pl_len );
-
- if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
- ||
- ( tried_to_flush_protected_entry ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "cache has protected items")
- }
-
-#if H5C_DO_SANITY_CHECKS
- if ( ! flush_marked_entries ) {
-
- HDassert( cache_ptr->slist_len == 0 );
- HDassert( cache_ptr->slist_size == 0 );
- }
-#endif /* H5C_DO_SANITY_CHECKS */
-
- }
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C_flush_ring(f, dxpl_id, ring, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed.")
+ ring++;
+ } /* end while */
+ } /* end else */
done:
cache_ptr->flush_in_progress = FALSE;
@@ -2057,6 +1837,7 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr)
FUNC_LEAVE_NOAPI(H5C_get_trace_file_ptr(entry_ptr->cache_ptr))
} /* H5C_get_trace_file_ptr_from_entry() */
+
/*-------------------------------------------------------------------------
* Function: H5C_set_up_logging
@@ -2086,7 +1867,7 @@ H5C_set_up_logging(H5C_t *cache_ptr, const char log_location[],
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t *aux_ptr = NULL;
#endif /*H5_HAVE_PARALLEL*/
- char *file_name;
+ char *file_name = NULL;
size_t n_chars;
herr_t ret_value = SUCCEED; /* Return value */
@@ -2151,8 +1932,8 @@ H5C_set_up_logging(H5C_t *cache_ptr, const char log_location[],
cache_ptr->currently_logging = start_immediately;
done:
-
- HDfree(file_name);
+ if(file_name)
+ HDfree(file_name);
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_set_up_logging() */
@@ -2410,7 +2191,8 @@ H5C_insert_entry(H5F_t * f,
unsigned int flags)
{
H5C_t * cache_ptr;
- herr_t result;
+ H5P_genplist_t *dxpl;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
hbool_t insert_pinned;
hbool_t flush_last;
#ifdef H5_HAVE_PARALLEL
@@ -2456,6 +2238,14 @@ H5C_insert_entry(H5F_t * f,
flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 );
#endif /* H5_HAVE_PARALLEL */
+ /* Get the dataset transfer property list */
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "not a property list")
+
+ /* Get the ring type from the DXPL */
+ if((H5P_get(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to query ring value")
+
entry_ptr = (H5C_cache_entry_t *)thing;
/* verify that the new entry isn't already in the hash table -- scream
@@ -2481,7 +2271,7 @@ H5C_insert_entry(H5F_t * f,
/* Apply tag to newly inserted entry */
if(H5C_tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
/* Set the entry's cork status */
if(H5C_cork(cache_ptr, entry_ptr->tag, H5C__GET_CORKED, &entry_ptr->is_corked) < 0)
@@ -2537,6 +2327,8 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->flush_in_progress = FALSE;
entry_ptr->destroy_in_progress = FALSE;
+ entry_ptr->ring = ring;
+
/* Initialize flush dependency height fields */
entry_ptr->flush_dep_parent = NULL;
entry_ptr->flush_dep_nparents = 0;
@@ -2558,13 +2350,8 @@ H5C_insert_entry(H5F_t * f,
if ( ( cache_ptr->flash_size_increase_possible ) &&
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
- result = H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "H5C__flash_increase_cache_size failed.")
- }
+ if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed.")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -2581,33 +2368,20 @@ H5C_insert_entry(H5F_t * f,
size_t space_needed;
- if ( empty_space <= entry_ptr->size ) {
-
+ if(empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- }
-
- if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "Can't get write_permitted")
- }
- } else {
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
+ } /* end if */
+ else
write_permitted = cache_ptr->write_permitted;
- }
-
- HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE );
+ HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
space_needed = entry_ptr->size;
-
- if ( space_needed > cache_ptr->max_cache_size ) {
-
+ if(space_needed > cache_ptr->max_cache_size)
space_needed = cache_ptr->max_cache_size;
- }
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
@@ -2634,16 +2408,8 @@ H5C_insert_entry(H5F_t * f,
* no point in worrying about the third.
*/
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- space_needed,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "H5C_make_space_in_cache failed.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_make_space_in_cache failed.")
}
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -2744,15 +2510,15 @@ H5C_mark_entry_dirty(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
if ( entry_ptr->is_protected ) {
- HDassert( ! ((entry_ptr)->is_read_only) );
+ HDassert( ! ((entry_ptr)->is_read_only) );
/* set the dirtied flag */
entry_ptr->dirtied = TRUE;
} else if ( entry_ptr->is_pinned ) {
- hbool_t was_pinned_unprotected_and_clean;
+ hbool_t was_pinned_unprotected_and_clean;
- was_pinned_unprotected_and_clean = ! ( entry_ptr->is_dirty );
+ was_pinned_unprotected_and_clean = ! ( entry_ptr->is_dirty );
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
@@ -2760,20 +2526,13 @@ H5C_mark_entry_dirty(void *thing)
/* Propagate the dirty flag up the flush dependency chain if appropriate
*/
- if ( was_pinned_unprotected_and_clean ) {
-
+ if(was_pinned_unprotected_and_clean) {
H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr);
- if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
- ( entry_ptr->flush_dep_nparents > 0 ) ) {
-
- if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
- "Can't propagate flush dep dirty flag")
- }
- }
- }
+ if((entry_ptr->flush_dep_ndirty_children == 0) && (entry_ptr->flush_dep_nparents > 0))
+ if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end if */
if ( ! (entry_ptr->in_slist) ) {
@@ -2813,9 +2572,9 @@ H5C_move_entry(H5C_t * cache_ptr,
haddr_t old_addr,
haddr_t new_addr)
{
- hbool_t was_dirty;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * test_entry_ptr = NULL;
+ hbool_t was_dirty;
#if H5C_DO_SANITY_CHECKS
hbool_t removed_entry_from_slist = FALSE;
#endif /* H5C_DO_SANITY_CHECKS */
@@ -2927,31 +2686,28 @@ H5C_move_entry(H5C_t * cache_ptr,
if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
( entry_ptr->flush_dep_nparents > 0 ) ) {
- if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
- "Can't propagate flush dep dirty flag")
- }
+ if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
}
}
}
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_SANITY_CHECKS
- if ( removed_entry_from_slist ) {
+ if ( removed_entry_from_slist ) {
- /* we just removed the entry from the slist. Thus we
- * must touch up cache_ptr->slist_len_increase and
- * cache_ptr->slist_size_increase to keep from skewing
- * the sanity checks.
- */
+ /* we just removed the entry from the slist. Thus we
+ * must touch up cache_ptr->slist_len_increase and
+ * cache_ptr->slist_size_increase to keep from skewing
+ * the sanity checks.
+ */
cache_ptr->slist_len_increase -= 1;
cache_ptr->slist_size_increase -= (int64_t)(entry_ptr->size);
- }
+ }
#endif /* H5C_DO_SANITY_CHECKS */
@@ -3050,11 +2806,8 @@ H5C_resize_entry(void *thing, size_t new_size)
if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
( entry_ptr->flush_dep_nparents > 0 ) ) {
- if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
- "Can't propagate flush dep dirty flag")
- }
+ if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
}
}
@@ -3306,6 +3059,8 @@ H5C_protect(H5F_t * f,
unsigned flags)
{
H5C_t * cache_ptr;
+ H5P_genplist_t *dxpl;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
hbool_t hit;
hbool_t have_write_permitted = FALSE;
hbool_t read_only = FALSE;
@@ -3314,7 +3069,6 @@ H5C_protect(H5F_t * f,
hbool_t flush_collectively;
#endif /* H5_HAVE_PARALLEL */
hbool_t write_permitted;
- herr_t result;
size_t empty_space;
void * thing;
H5C_cache_entry_t * entry_ptr;
@@ -3349,15 +3103,48 @@ H5C_protect(H5F_t * f,
flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 );
#endif /* H5_HAVE_PARALLEL */
+ /* Get the dataset transfer property list */
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "not a property list")
+
+ /* Get the ring type from the DXPL */
+ if((H5P_get(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "unable to query ring value")
+
/* first check to see if the target is in cache */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
if ( entry_ptr != NULL ) {
+ if(entry_ptr->ring != ring)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry\n");
/* Check for trying to load the wrong type of entry from an address */
if(entry_ptr->type != type)
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
+#if H5C_DO_TAGGING_SANITY_CHECKS
+{
+ H5C_tag_t tag; /* Tag structure */
+
+ /* The entry is already in the cache, but make sure that the tag value
+ being passed in via dxpl is still legal. This will ensure that had
+ the entry NOT been in the cache, tagging was still set up correctly
+ and it would have received a legal tag value after getting loaded
+ from disk. */
+
+ /* Get the tag from the DXPL */
+ if((H5P_get(dxpl, "H5C_tag", &tag)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to query property value");
+
+ /* Verify tag value */
+ if(cache_ptr->ignore_tags != TRUE) {
+ /* Verify legal tag value */
+ if((H5C_verify_tag(entry_ptr->type->id, tag.value)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed");
+ } /* end if */
+}
+#endif
+
hit = TRUE;
thing = (void *)entry_ptr;
@@ -3372,13 +3159,15 @@ H5C_protect(H5F_t * f,
if ( thing == NULL ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
- }
+ }
entry_ptr = (H5C_cache_entry_t *)thing;
+ entry_ptr->ring = ring;
+
/* Apply tag to newly protected entry */
if(H5C_tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
/* Set the entry's cork status */
if(H5C_cork(cache_ptr, entry_ptr->tag, H5C__GET_CORKED, &entry_ptr->is_corked) < 0)
@@ -3390,14 +3179,8 @@ H5C_protect(H5F_t * f,
if ( ( cache_ptr->flash_size_increase_possible ) &&
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
- result = H5C__flash_increase_cache_size(cache_ptr, 0,
- entry_ptr->size);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C__flash_increase_cache_size failed.")
- }
+ if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed.")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -3423,20 +3206,11 @@ H5C_protect(H5F_t * f,
if(empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Can't get write_permitted 1")
-
- } else {
-
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
+ else
have_write_permitted = TRUE;
-
- }
} else {
write_permitted = cache_ptr->write_permitted;
@@ -3483,16 +3257,8 @@ H5C_protect(H5F_t * f,
* see no point in worrying about the fourth.
*/
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- space_needed,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C_make_space_in_cache failed 1.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 1.")
}
/* Insert the entry in the hash table. It can't be dirty yet, so
@@ -3586,19 +3352,10 @@ H5C_protect(H5F_t * f,
if ( ! have_write_permitted ) {
if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Can't get write_permitted 2")
-
- } else {
-
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 2")
+ else
have_write_permitted = TRUE;
-
- }
} else {
write_permitted = cache_ptr->write_permitted;
@@ -3612,14 +3369,8 @@ H5C_protect(H5F_t * f,
( cache_ptr->cache_accesses >=
(cache_ptr->resize_ctl).epoch_length ) ) {
- result = H5C__auto_adjust_cache_size(f,
- dxpl_id,
- write_permitted);
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Cache auto-resize failed.")
- }
+ if(H5C__auto_adjust_cache_size(f, dxpl_id, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed.")
}
if ( cache_ptr->size_decreased ) {
@@ -3648,16 +3399,8 @@ H5C_protect(H5F_t * f,
if(cache_ptr->index_size > cache_ptr->max_cache_size)
cache_ptr->cache_full = TRUE;
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- (size_t)0,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C_make_space_in_cache failed 2.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 2.")
}
}
}
@@ -4119,7 +3862,7 @@ herr_t
H5C_set_trace_file_ptr(H5C_t * cache_ptr,
FILE * trace_file_ptr)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -4140,46 +3883,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5C_set_log_file_ptr
- *
- * Purpose: Set the log_file_ptr field for the cache.
- *
- * This field must either be NULL (which turns of metadata
- * cache logging), or be a pointer to an open file to which
- * log entries are to be written.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 1/20/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_set_log_file_ptr(H5C_t * cache_ptr,
- FILE * log_file_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* This would normally be an assert, but we need to use an HGOTO_ERROR
- * call to shut up the compiler.
- */
- if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
- }
-
- cache_ptr->log_file_ptr = log_file_ptr;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5C_set_log_file_ptr() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5C_stats
*
* Purpose: Prints statistics about the cache.
@@ -5318,11 +5021,8 @@ H5C_unprotect(H5F_t * f,
if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
( entry_ptr->flush_dep_nparents > 0 ) ) {
- if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
- "Can't propagate flush dep dirty flag")
- }
+ if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
}
H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
@@ -5333,11 +5033,8 @@ H5C_unprotect(H5F_t * f,
if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
( entry_ptr->flush_dep_nparents > 0 ) ) {
- if ( H5C__mark_flush_dep_clean(entry_ptr) < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
- "Can't propagate flush dep dirty flag")
- }
+ if ( H5C__mark_flush_dep_clean(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
}
}
@@ -6650,9 +6347,9 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
}
/* dirty corked entry is skipped */
- if(entry_ptr->is_corked && entry_ptr->is_dirty) {
+ if(entry_ptr->is_corked && entry_ptr->is_dirty)
corked = TRUE;
- } else if ( entry_ptr->is_dirty ) {
+ else if ( entry_ptr->is_dirty ) {
/* reset entries_removed_counter and
* last_entry_removed_ptr prior to the call to
@@ -6683,10 +6380,10 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
if ( prev_ptr != NULL ) {
- if(corked) { /* dirty corked entry is skipped */
+ if(corked) /* dirty corked entry is skipped */
entry_ptr = prev_ptr;
- } else if ( ( restart_scan )
+ else if ( ( restart_scan )
||
( prev_ptr->is_dirty != prev_is_dirty )
||
@@ -7233,58 +6930,153 @@ done:
* Programmer: John Mainzer
* 3/24/065
*
- * Changes: Modified function to test for slist chamges in
- * pre_serialize and serialize callbacks, and re-start
- * scans through the slist when such changes occur.
- *
- * This has been a potential problem for some time,
- * and there has been code in this function to deal
- * with elements of this issue. However the shift
- * to the V3 cache in combination with the activities
- * of some of the cache clients (in particular the
- * free space manager and the fractal heap) have
- * made this re-work necessary in H5C_flush_cache.
- *
- * At present, this issue doesn't seem to be causing problems
- * in H5C_flush_invalidate_cache(). However, it seems
- * prudent to port the H5C_flush_cache changes to this
- * function as well.
- *
- * JRM -- 12/14/14
- *
- * Added code to track entry size change during flush single
- * entry. This didn't used to be a problem, as the entry
- * was largely removed from the cache data structures before
- * the flush proper. However, re-entrant calls to the cache
- * in the parallel case required a re-factoring of the
- * H5C__flush_single_entry() function to keep entries fully
- * in the cache until after the pre-serialize and serialize
- * calls.
- * JRM -- 12/25/14
- *
*-------------------------------------------------------------------------
*/
static herr_t
H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
{
H5C_t * cache_ptr;
+ H5C_ring_t ring;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+{
+ int32_t i;
+ int32_t index_len = 0;
+ int32_t slist_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+}
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* remove ageout markers if present */
+ if(cache_ptr->epoch_markers_active > 0)
+ if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
+
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C_flush_invalidate_ring(f, dxpl_id, ring, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed.")
+ ring++;
+ } /* end while */
+
+ /* Invariants, after destroying all entries in the hash table */
+ if(!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
+ HDassert(cache_ptr->index_size == 0);
+ HDassert(cache_ptr->clean_index_size == 0);
+ HDassert(cache_ptr->dirty_index_size == 0);
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+ HDassert(cache_ptr->pel_len == 0);
+ HDassert(cache_ptr->pel_size == 0);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->pl_size == 0);
+ HDassert(cache_ptr->LRU_list_len == 0);
+ HDassert(cache_ptr->LRU_list_size == 0);
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_invalidate_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_invalidate_ring
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache and ring.
+ *
+ * If the ring contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the cache flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned flags)
+{
+ H5C_t * cache_ptr;
hbool_t restart_slist_scan;
int32_t protected_entries = 0;
int32_t i;
- int32_t cur_pel_len;
- int32_t old_pel_len;
+ int32_t cur_ring_pel_len;
+ int32_t old_ring_pel_len;
unsigned cooked_flags;
unsigned evict_flags;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t flushed_slist_len = 0;
+ int64_t flushed_slist_len = 0;
int64_t initial_slist_len = 0;
int64_t flushed_slist_size = 0;
size_t initial_slist_size = 0;
- int64_t entry_size_change;
- int64_t * entry_size_change_ptr = &entry_size_change;
+ int64_t entry_size_change;
+ int64_t * entry_size_change_ptr = &entry_size_change;
#else /* H5C_DO_SANITY_CHECKS */
int64_t * entry_size_change_ptr = NULL;
#endif /* H5C_DO_SANITY_CHECKS */
@@ -7298,24 +7090,22 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->epoch_markers_active == 0);
/* Filter out the flags that are not relevant to the flush/invalidate.
- * At present, only the H5C__FLUSH_CLEAR_ONLY_FLAG is kept.
*/
cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
- /* remove ageout markers if present */
- if(cache_ptr->epoch_markers_active > 0)
- if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
-
/* The flush proceedure here is a bit strange.
*
* In the outer while loop we make at least one pass through the
- * cache, and then repeat until either all the pinned entries
- * unpin themselves, or until the number of pinned entries stops
- * declining. In this later case, we scream and die.
+ * cache, and then repeat until either all the pinned entries in
+ * the ring unpin themselves, or until the number of pinned entries
+ * in the ring stops declining. In this later case, we scream and die.
*
* Since the fractal heap can dirty, resize, and/or move entries
* in is flush callback, it is possible that the cache will still
@@ -7339,13 +7129,20 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
* to increasing address order, but there are no guarantees.
*/
- cur_pel_len = cache_ptr->pel_len;
- old_pel_len = cache_ptr->pel_len;
-
- while ( cache_ptr->index_len > 0 )
- {
- protected_entries = 0;
-
+ /* compute the number of pinned entries in this ring */
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+ if(entry_ptr->ring == ring)
+ cur_ring_pel_len++;
+
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ old_ring_pel_len = cur_ring_pel_len;
+ while(cache_ptr->index_ring_len[ring] > 0) {
/* first, try to flush-destroy any dirty entries. Do this by
* making a scan through the slist. Note that new dirty entries
* may be created by the flush call backs. Thus it is possible
@@ -7353,35 +7150,37 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
*/
#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C__flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
+ /* Depending on circumstances, H5C__flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
* Thus for sanity checks we must make note of the initial
* slist length and size before we do any flushes.
*/
initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
+ initial_slist_size = cache_ptr->slist_size;
/* There is also the possibility that entries will be
- * dirtied, resized, and/or moved as the result of
- * calls to the flush callbacks. We use the slist_len_increase
- * and slist_size_increase increase fields in struct H5C_t
- * to track these changes for purpose of sanity checking.
+ * dirtied, resized, moved, and/or removed from the cache
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
+ * of sanity checking.
+ *
* To this end, we must zero these fields before we start
* the pass through the slist.
*/
cache_ptr->slist_len_increase = 0;
cache_ptr->slist_size_increase = 0;
- /* Finally, reset the flushed_slist_len and flushed_slist_size
+ /* Finally, reset the flushed_slist_len and flushed_slist_size
* fields to zero, as these fields are used to accumulate
* the slist lenght and size that we see as we scan through
* the slist.
*/
- flushed_slist_len = 0;
- flushed_slist_size = 0;
+ flushed_slist_len = 0;
+ flushed_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- /* set the cache_ptr->slist_change_in_pre_serialize and
+ /* set the cache_ptr->slist_change_in_pre_serialize and
* cache_ptr->slist_change_in_serialize to false.
*
* These flags are set to TRUE by H5C__flush_single_entry if the
@@ -7393,18 +7192,16 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
* the slist -- and restart the scan in this event.
*/
cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- /* this done, start the scan of the slist */
- restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ /* this done, start the scan of the slist */
+ restart_slist_scan = TRUE;
while(restart_slist_scan || (node_ptr != NULL)) {
if(restart_slist_scan) {
restart_slist_scan = FALSE;
/* Start at beginning of skip list */
node_ptr = H5SL_first(cache_ptr->slist_ptr);
-
if(node_ptr == NULL)
/* the slist is empty -- break out of inner loop */
break;
@@ -7417,24 +7214,26 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
} /* end if */
- entry_ptr = next_entry_ptr;
+ entry_ptr = next_entry_ptr;
- /* It is possible that entries will be dirtied, resized,
- * flushed, or removed from the cache via the take ownership
- * flag as the result of pre_serialize or serialized callbacks.
- *
+ /* It is possible that entries will be dirtied, resized,
+ * flushed, or removed from the cache via the take ownership
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
* This in turn can corrupt the scan through the slist.
*
- * We test for slist modifications in the pre_serialize
- * and serialize callbacks, and restart the scan of the
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
* slist if we find them. However, best we do some extra
- * sanity checking just in case.
- */
+ * sanity checking just in case.
+ */
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->in_slist);
HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
/* increment node pointer now, before we delete its target
* from the slist.
@@ -7447,6 +7246,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
HDassert(entry_ptr != next_entry_ptr);
} /* end if */
else
@@ -7465,11 +7265,11 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(entry_ptr != NULL);
HDassert(entry_ptr->in_slist);
- if ( ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) &&
- ( entry_ptr->flush_dep_nchildren == 0 ) ) {
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) &&
+ (entry_ptr->ring == ring)) {
if(entry_ptr->is_protected) {
/* we have major problems -- but lets flush
* everything we can before we flag an error.
@@ -7478,87 +7278,83 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
} else if(entry_ptr->is_pinned) {
#if H5C_DO_SANITY_CHECKS
- /* update flushed_slist_len & flushed_slist_size
- * before the flush. Note that the entry will
- * be removed from the slist after the flush,
- * and thus may be resized by the flush callback.
- * This is OK, as we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- */
- flushed_slist_len++;
- flushed_slist_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, entry_size_change_ptr) < 0)
- /* This shouldn't happen -- if it does, we are toast
- * so just scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
#if H5C_DO_SANITY_CHECKS
- /* entry size may have changed during the flush.
- * Update flushed_slist_size to account for this.
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
*/
- flushed_slist_size += entry_size_change;
+ flushed_slist_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- if ( ( cache_ptr->slist_change_in_serialize ) ||
- ( cache_ptr->slist_change_in_pre_serialize ) )
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- }
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
+ } /* end if */
} /* end if */
else {
#if H5C_DO_SANITY_CHECKS
- /* update flushed_slist_len & flushed_slist_size
- * before the flush. Note that the entry will
- * be removed from the slist after the flush,
- * and thus may be resized by the flush callback.
- * This is OK, as we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- */
- flushed_slist_len++;
- flushed_slist_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), entry_size_change_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
#if H5C_DO_SANITY_CHECKS
- /* entry size may have changed during the flush.
- * Update flushed_slist_size to account for this.
- */
- flushed_slist_size += entry_size_change;
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
+ */
+ flushed_slist_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
* This has the potential to corrupt the
* scan through the slist, so restart it.
*/
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
cache_ptr->slist_change_in_serialize = FALSE;
H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
-
+ } /* end if */
} /* end else */
} /* end if */
} /* end while loop scanning skip list */
@@ -7597,42 +7393,44 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
while(next_entry_ptr != NULL) {
entry_ptr = next_entry_ptr;
- HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
next_entry_ptr = entry_ptr->ht_next;
HDassert((next_entry_ptr == NULL) ||
(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
- if ( ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) &&
- ( entry_ptr->flush_dep_nchildren == 0 ) ) {
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) &&
+ (entry_ptr->ring == ring)) {
if(entry_ptr->is_protected) {
- /* we have major problems -- but lets flush and destroy
- * everything we can before we flag an error.
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
*/
protected_entries++;
-
if(!entry_ptr->in_slist)
HDassert(!(entry_ptr->is_dirty));
} else if(!(entry_ptr->is_pinned)) {
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
+
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
* into the cache, or moved to a new location
* in the file as a side effect of the flush.
*
- * If this happens, and one of the target
- * entries happens to be the next entry in
- * the hash bucket, we could find ourselves
- * either find ourselves either scanning a
- * non-existant entry, scanning through a
+ * If this happens, and one of the target
+ * entries happens to be the next entry in
+ * the hash bucket, we could find ourselves
+ * either find ourselves either scanning a
+ * non-existant entry, scanning through a
* different bucket, or skipping an entry.
*
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
* after the flush if *entry_ptr was dirty,
* on the off chance that the next entry was
* a target.
@@ -7643,10 +7441,12 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
*/
hbool_t entry_was_dirty;
- entry_was_dirty = entry_ptr->is_dirty;
+ entry_was_dirty = entry_ptr->is_dirty;
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
if(entry_was_dirty) {
/* update stats for hash bucket scan
@@ -7658,6 +7458,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
} /* end if */
} /* end if */
} /* end if */
+
/* We can't do anything if the entry is pinned. The
* hope is that the entry will be unpinned as the
* result of destroys of entries that reference it.
@@ -7666,7 +7467,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
* of pinned entries from pass to pass. If it stops
* shrinking before it hits zero, we scream and die.
*/
- /* if the serialize function on the entry we last evicted
+ /* if the serialize function on the entry we last evicted
* loaded an entry into cache (as Quincey has promised me
* it never will), and if the cache was full, it is
* possible that *next_entry_ptr was flushed or evicted.
@@ -7675,9 +7476,9 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
* test is triggred, we are accessing a deallocated piece
* of dynamically allocated memory, so we just scream and
* die.
- *
- * Update: The code to restart the scan after flushes
- * of dirty entries should make it impossible
+ *
+ * Update: The code to restart the scan after flushes
+ * of dirty entries should make it impossible
* to satisfy the following test. Leave it in
* in case I am wrong.
* -- JRM
@@ -7690,61 +7491,393 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
} /* end while loop scanning hash table bin */
} /* end for loop scanning hash table */
- old_pel_len = cur_pel_len;
- cur_pel_len = cache_ptr->pel_len;
-
- if ( ( cur_pel_len > 0 ) && ( cur_pel_len >= old_pel_len ) ) {
+ old_ring_pel_len = cur_ring_pel_len;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
- if(evict_flags) HGOTO_DONE(TRUE)
+ if(entry_ptr->ring == ring)
+ cur_ring_pel_len++;
- /* The number of pinned entries is positive, and it is not
- * declining. Scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_pel_len = %d, old_pel_len = %d", (int)cur_pel_len, (int)old_pel_len)
- }
- HDassert( protected_entries == cache_ptr->pl_len );
- if ( ( protected_entries > 0 ) &&
- ( protected_entries == cache_ptr->index_len ) ) {
+ entry_ptr = entry_ptr->next;
+ } /* end while */
- /* Only protected entries are left in the cache. Scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "Only protected entries left in cache, protected_entries = %d", (int)protected_entries)
+ if((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ /* Don't error if allowed to have pinned entries remaining */
+ if(evict_flags)
+ HGOTO_DONE(TRUE)
- }
+ /* The number of pinned entries in the ring is positive, and
+ * it is not declining. Scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
+ } /* end if */
+ HDassert(protected_entries == cache_ptr->pl_len);
+ if((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Only protected entries left in cache, protected_entries = %d", (int)protected_entries)
} /* main while loop */
- /* Invariants, after destroying all entries in the hash table */
- HDassert(cache_ptr->index_size == 0);
- HDassert(cache_ptr->clean_index_size == 0);
- HDassert(cache_ptr->dirty_index_size == 0);
- HDassert(cache_ptr->slist_len == 0);
- HDassert(cache_ptr->slist_size == 0);
- HDassert(cache_ptr->pel_len == 0);
- HDassert(cache_ptr->pel_size == 0);
- HDassert(cache_ptr->pl_len == 0);
- HDassert(cache_ptr->pl_size == 0);
- HDassert(cache_ptr->LRU_list_len == 0);
- HDassert(cache_ptr->LRU_list_size == 0);
+ /* Invariants, after destroying all entries in the ring */
+ for(i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
+ HDassert(cache_ptr->index_ring_len[i] == 0);
+ HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
+ } /* end for */
HDassert(protected_entries <= cache_ptr->pl_len);
if(protected_entries > 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries.")
+ else if(cur_ring_pel_len > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring.")
- else if(cur_pel_len > 0) {
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_invalidate_ring() */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries.")
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_ring
+ *
+ * Purpose: Flush the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been flushed on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be flushed. However all unprotected entries in the target
+ * ring should be flushed before the function returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the slist flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t * cache_ptr = f->shared->cache;
+ hbool_t destroy;
+ hbool_t flushed_entries_last_pass;
+ hbool_t flush_marked_entries;
+ hbool_t ignore_protected;
+ hbool_t tried_to_flush_protected_entry = FALSE;
+ hbool_t restart_slist_scan;
+ int32_t protected_entries = 0;
+ H5SL_node_t * node_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t * next_entry_ptr = NULL;
+#if H5C_DO_SANITY_CHECKS
+ int64_t flushed_entries_count = 0;
+ int64_t flushed_entries_size = 0;
+ int64_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+ int64_t entry_size_change;
+ int64_t * entry_size_change_ptr = &entry_size_change;
+#else /* H5C_DO_SANITY_CHECKS */
+ int64_t * entry_size_change_ptr = NULL;
+#endif /* H5C_DO_SANITY_CHECKS */
+ int i;
+ herr_t ret_value = SUCCEED;
- }
+ FUNC_ENTER_NOAPI(FAIL)
-done:
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
- FUNC_LEAVE_NOAPI(ret_value)
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
+ (H5C_validate_lru_list(cache_ptr) < 0)) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n");
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-} /* H5C_flush_invalidate_cache() */
+ ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
+ destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
+ flush_marked_entries = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 );
+
+ if(!flush_marked_entries)
+ for(i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+
+ HDassert(cache_ptr->flush_in_progress);
+
+ /* When we are only flushing marked entries, the slist will usually
+ * still contain entries when we have flushed everything we should.
+ * Thus we track whether we have flushed any entries in the last
+ * pass, and terminate if we haven't.
+ */
+ flushed_entries_last_pass = TRUE;
+
+ /* set the cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize to false.
+ *
+ * These flags are set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize or serialize call respectively.
+ * H5C_flush_cache uses these flags to detect any modifications
+ * to the slist that might corrupt the scan of the slist -- and
+ * restart the scan in this event.
+ */
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ while((cache_ptr->slist_ring_len[ring] > 0) &&
+ (protected_entries == 0) &&
+ (flushed_entries_last_pass)) {
+ flushed_entries_last_pass = FALSE;
+
+#if H5C_DO_SANITY_CHECKS
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* We then zero counters that we use to track the number
+ * and total size of entries flushed:
+ */
+ flushed_entries_count = 0;
+ flushed_entries_size = 0;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ restart_slist_scan = TRUE;
+
+ while((restart_slist_scan ) || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ if(node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
+
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ if(!flush_marked_entries || entry_ptr->flush_marker)
+ HDassert(entry_ptr->ring >= ring);
+
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if(node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ if(!flush_marked_entries || next_entry_ptr->flush_marker)
+ HDassert(next_entry_ptr->ring >= ring);
+
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
+
+ if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
+ ((!entry_ptr->flush_me_last) ||
+ (entry_ptr->flush_me_last &&
+ ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
+ (flush_marked_entries && entry_ptr->flush_marker)))) &&
+ ( ( entry_ptr->flush_dep_nchildren == 0 ) ||
+ ( ( ! destroy ) &&
+ ( entry_ptr->flush_dep_ndirty_children == 0 ) ) ) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+ } /* end if */
+ else if(entry_ptr->is_pinned) {
+
+#if H5C_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+
+ flushed_entries_last_pass = TRUE;
+ } /* end else-if */
+ else {
+#if H5C_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+
+ flushed_entries_last_pass = TRUE;
+ } /* end else */
+ } /* end if */
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
+
+#if H5C_DO_SANITY_CHECKS
+ /* Verify that the slist size and length are as expected. */
+ HDassert((initial_slist_len + cache_ptr->slist_len_increase -
+ flushed_entries_count) == cache_ptr->slist_len);
+ HDassert((size_t)((int64_t)initial_slist_size +
+ cache_ptr->slist_size_increase -
+ flushed_entries_size) == cache_ptr->slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+ } /* while */
+
+ HDassert(protected_entries <= cache_ptr->pl_len);
+
+ if(((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
+
+#if H5C_DO_SANITY_CHECKS
+ if(!flush_marked_entries) {
+ HDassert(cache_ptr->slist_ring_len[ring] == 0);
+ HDassert(cache_ptr->slist_ring_size[ring] == 0);
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_ring() */
/*-------------------------------------------------------------------------
@@ -7760,7 +7893,7 @@ done:
* error.
*
* If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
- * be cleared and not flushed, and the call can't be part of a
+ * be cleared and not flushed, and the call can't be part of a
* sequence of flushes.
*
* If the caller knows the address of the skip list node at
@@ -7778,33 +7911,33 @@ done:
*
* Programmer: John Mainzer, 5/5/04
*
- * Changes: Refactored function to remove the type_ptr parameter.
+ * Changes: Refactored function to remove the type_ptr parameter.
*
- * JRM -- 8/7/14
+ * JRM -- 8/7/14
*
* Added code to check for slist changes in pre_serialize and
- * serialize calls, and set
- * cache_ptr->slist_change_in_pre_serialize and
- * cache_ptr->slist_change_in_serialize as appropriate.
+ * serialize calls, and set
+ * cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize as appropriate.
*
* JRM -- 12/13/14
*
- * Refactored function to delay all modifications of the
- * metadata cache data structures until after any calls
- * to the pre-serialize or serialize callbacks.
+ * Refactored function to delay all modifications of the
+ * metadata cache data structures until after any calls
+ * to the pre-serialize or serialize callbacks.
*
- * Need to do this, as some pre-serialize or serialize
- * calls result in calls to the metadata cache and
- * modifications to its data structures. Thus, at the
- * time of any such call, the target entry flags and
- * the metadata cache must all be consistant.
+ * Need to do this, as some pre-serialize or serialize
+ * calls result in calls to the metadata cache and
+ * modifications to its data structures. Thus, at the
+ * time of any such call, the target entry flags and
+ * the metadata cache must all be consistant.
*
- * Also added the entry_size_change_ptr parameter, which
- * allows the function to report back any change in the size
- * of the entry during the flush. Such size changes may
- * occur during the pre-serialize callback.
+ * Also added the entry_size_change_ptr parameter, which
+ * allows the function to report back any change in the size
+ * of the entry during the flush. Such size changes may
+ * occur during the pre-serialize callback.
*
- * JRM -- 12/24/14
+ * JRM -- 12/24/14
*
*-------------------------------------------------------------------------
*/
@@ -7815,16 +7948,16 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
H5C_t * cache_ptr; /* Cache for file */
hbool_t destroy; /* external flag */
hbool_t clear_only; /* external flag */
- hbool_t free_file_space; /* external flag */
+ hbool_t free_file_space; /* external flag */
hbool_t take_ownership; /* external flag */
hbool_t del_from_slist_on_destroy; /* external flag */
- hbool_t write_entry; /* internal flag */
- hbool_t destroy_entry; /* internal flag */
+ hbool_t write_entry; /* internal flag */
+ hbool_t destroy_entry; /* internal flag */
hbool_t was_dirty;
- haddr_t new_addr = HADDR_UNDEF;
- haddr_t old_addr = HADDR_UNDEF;
- size_t new_len = 0;
- size_t new_compressed_len = 0;
+ haddr_t new_addr = HADDR_UNDEF;
+ haddr_t old_addr = HADDR_UNDEF;
+ size_t new_len = 0;
+ size_t new_compressed_len = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -7834,6 +7967,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(entry_ptr);
+ HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
/* If defined, initialize *entry_size_change_ptr to 0 */
if(entry_size_change_ptr != NULL)
@@ -8237,7 +8371,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
/* only log a clear if the entry was dirty */
if(was_dirty) {
H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
- } /* end if */
+ } /* end if */
} else if(write_entry) {
HDassert(was_dirty);
@@ -8279,34 +8413,30 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
- if(entry_ptr->in_slist && del_from_slist_on_destroy)
+ if(entry_ptr->in_slist && del_from_slist_on_destroy)
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
- H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+ H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
/* Destroy parent flush dependencies and free flush dependency parent
* array, if necessary */
- HDassert( entry_ptr->flush_dep_nchildren == 0 );
- if ( entry_ptr->flush_dep_parent_nalloc > 0 ) {
-
- while ( entry_ptr->flush_dep_nparents > 0 ) {
-
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
+ if(entry_ptr->flush_dep_parent_nalloc > 0) {
+ while(entry_ptr->flush_dep_nparents > 0) {
/* Since the flush_dep_parent array willbe resized every
* a flush dependency is destroyed, we do not have to
* iterate over all indices. Instead, always destroy the
* last dependency so we can skip the memmove() in
* H5C_destroy_flush_dependency(). */
- if ( H5C_destroy_flush_dependency(entry_ptr->flush_dep_parent[entry_ptr->flush_dep_nparents - 1],
- entry_ptr) < 0 ) {
+ if(H5C_destroy_flush_dependency(entry_ptr->flush_dep_parent[entry_ptr->flush_dep_nparents - 1], entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Can't remove flush dependency")
- }
- }
+ } /* end while */
/* H5C_destroy_flush_dependency should have freed the parent
* array when it became empty */
- HDassert( ! (entry_ptr->flush_dep_parent) );
- HDassert( entry_ptr->flush_dep_parent_nalloc == 0 );
- }
+ HDassert(!(entry_ptr->flush_dep_parent));
+ HDassert(entry_ptr->flush_dep_parent_nalloc == 0);
+ } /* end if */
}
else {
HDassert(clear_only || write_entry);
@@ -8339,18 +8469,13 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
/* Propagate the clean flag up the flush dependency chain if
* appropriate */
- if ( was_dirty ) {
-
- HDassert( entry_ptr->flush_dep_ndirty_children == 0 );
-
- if ( entry_ptr->flush_dep_nparents > 0 ) {
-
- if ( H5C__mark_flush_dep_clean(entry_ptr) < 0 ) {
+ if(was_dirty) {
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep clean flag")
- }
- }
- }
+ } /* end if */
}
@@ -8500,7 +8625,8 @@ done:
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
- * Programmer:
+ * Programmer: Vailin Choi
+ * 9/6/15
*
*-------------------------------------------------------------------------
*/
@@ -8581,14 +8707,14 @@ H5C_load_entry(H5F_t * f,
haddr_t addr,
void * udata)
{
- hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
- hbool_t compressed = FALSE; /* flag indicating whether thing */
- /* will be run through filters on */
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
+ hbool_t compressed = FALSE; /* flag indicating whether thing */
+ /* will be run through filters on */
/* on read and write. Usually FALSE */
- /* set to true if appropriate. */
- size_t compressed_size = 0; /* entry compressed size if */
+ /* set to true if appropriate. */
+ size_t compressed_size = 0; /* entry compressed size if */
/* known -- otherwise uncompressed. */
- /* Zero indicates compression not */
+ /* Zero indicates compression not */
/* enabled. */
uint8_t * image = NULL; /* Buffer for disk image */
void * thing = NULL; /* Pointer to thing loaded */
@@ -8697,10 +8823,8 @@ H5C_load_entry(H5F_t * f,
#endif /* modified code */ /* JRM */
}
/* Allocate the buffer for reading the on-disk entry image */
- if(NULL == (image = H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, \
- "memory allocation failed for on disk image buffer.")
+ if(NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer.")
#if H5C_DO_MEMORY_SANITY_CHECKS
HDmemcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
@@ -8731,7 +8855,7 @@ H5C_load_entry(H5F_t * f,
if(actual_len != len) {
if(NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
- image = new_image;
+ image = (uint8_t *)new_image;
}
if(H5F_block_read(f, type->mem_type, addr, len, dxpl_id, image) < 0)
@@ -8774,7 +8898,7 @@ H5C_load_entry(H5F_t * f,
if(NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
- image = new_image;
+ image = (uint8_t *)new_image;
#if H5C_DO_MEMORY_SANITY_CHECKS
HDmemcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
@@ -8788,8 +8912,7 @@ H5C_load_entry(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't read image")
}
} else { /* throw an error */
- HGOTO_ERROR(H5E_CACHE, H5E_UNSUPPORTED, NULL,
- "size of non-speculative, non-compressed object changed")
+ HGOTO_ERROR(H5E_CACHE, H5E_UNSUPPORTED, NULL, "size of non-speculative, non-compressed object changed")
}
} /* end if (actual_len != len) */
@@ -8811,7 +8934,7 @@ H5C_load_entry(H5F_t * f,
if(retries) { /* Does not track 0 retry */
if(H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
- }
+ } /* end if */
len = actual_len;
} /* end if !H5C__CLASS_SKIP_READS */
@@ -8830,7 +8953,7 @@ H5C_load_entry(H5F_t * f,
*
* In the following assert:
*
- * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
+ * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
*
* note that type ids 5 & 6 are associated with object headers in the
* metadata cache.
@@ -8847,12 +8970,12 @@ H5C_load_entry(H5F_t * f,
entry->addr = addr;
entry->size = len;
HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
- entry->compressed = compressed;
- entry->compressed_size = compressed_size;
+ entry->compressed = compressed;
+ entry->compressed_size = compressed_size;
entry->image_ptr = image;
entry->image_up_to_date = TRUE;
entry->type = type;
- entry->is_dirty = dirty;
+ entry->is_dirty = dirty;
entry->dirtied = FALSE;
entry->is_protected = FALSE;
entry->is_read_only = FALSE;
@@ -8867,6 +8990,8 @@ H5C_load_entry(H5F_t * f,
entry->flush_in_progress = FALSE;
entry->destroy_in_progress = FALSE;
+ entry->ring = H5C_RING_UNDEFINED;
+
/* Initialize flush dependency height fields */
entry->flush_dep_parent = NULL;
entry->flush_dep_nparents = 0;
@@ -8887,7 +9012,7 @@ H5C_load_entry(H5F_t * f,
ret_value = thing;
done:
- /* Cleanup on error */
+ /* Cleanup on error */
if(NULL == ret_value) {
/* Release resources */
@@ -8897,7 +9022,7 @@ done:
"free_icr callback failed")
if(image)
- image = H5MM_xfree(image);
+ image = (uint8_t *)H5MM_xfree(image);
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
@@ -8937,45 +9062,45 @@ done:
*
* Programmer: John Mainzer, 5/14/04
*
- * Changes: Modified function to skip over entries with the
- * flush_in_progress flag set. If this is not done,
- * an infinite recursion is possible if the cache is
- * full, and the pre-serialize or serialize routine
- * attempts to load another entry.
+ * Changes: Modified function to skip over entries with the
+ * flush_in_progress flag set. If this is not done,
+ * an infinite recursion is possible if the cache is
+ * full, and the pre-serialize or serialize routine
+ * attempts to load another entry.
*
* This error was exposed by a re-factor of the
* H5C__flush_single_entry() routine. However, it was
* a potential bug from the moment that entries were
* allowed to load other entries on flush.
*
- * In passing, note that the primary and secondary dxpls
- * mentioned in the comment above have been replaced by
- * a single dxpl at some point, and thus the discussion
- * above is somewhat obsolete. Date of this change is
- * unkown.
+ * In passing, note that the primary and secondary dxpls
+ * mentioned in the comment above have been replaced by
+ * a single dxpl at some point, and thus the discussion
+ * above is somewhat obsolete. Date of this change is
+ * unkown.
*
- * JRM -- 12/26/14
+ * JRM -- 12/26/14
*
- * Modified function to detect deletions of entries
- * during a scan of the LRU, and where appropriate,
- * restart the scan to avoid proceeding with a next
- * entry that is no longer in the cache.
+ * Modified function to detect deletions of entries
+ * during a scan of the LRU, and where appropriate,
+ * restart the scan to avoid proceeding with a next
+ * entry that is no longer in the cache.
*
- * Note the absence of checks after flushes of clean
- * entries. As a second entry can only be removed by
- * by a call to the pre_serialize or serialize callback
- * of the first, and as these callbacks will not be called
- * on clean entries, no checks are needed.
+ * Note the absence of checks after flushes of clean
+ * entries. As a second entry can only be removed by
+ * by a call to the pre_serialize or serialize callback
+ * of the first, and as these callbacks will not be called
+ * on clean entries, no checks are needed.
*
- * JRM -- 4/6/15
+ * JRM -- 4/6/15
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_make_space_in_cache(H5F_t * f,
- hid_t dxpl_id,
- size_t space_needed,
- hbool_t write_permitted)
+H5C_make_space_in_cache(H5F_t * f,
+ hid_t dxpl_id,
+ size_t space_needed,
+ hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
#if H5C_COLLECT_CACHE_STATS
@@ -8987,7 +9112,7 @@ H5C_make_space_in_cache(H5F_t * f,
size_t empty_space;
hbool_t prev_is_dirty = FALSE;
hbool_t didnt_flush_entry = FALSE;
- hbool_t restart_scan;
+ hbool_t restart_scan;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * prev_ptr;
H5C_cache_entry_t * next_ptr;
@@ -9004,7 +9129,7 @@ H5C_make_space_in_cache(H5F_t * f,
if ( write_permitted ) {
- restart_scan = FALSE;
+ restart_scan = FALSE;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
@@ -9018,7 +9143,7 @@ H5C_make_space_in_cache(H5F_t * f,
}
- while ( ( ( (cache_ptr->index_size + space_needed)
+ while ( ( ( (cache_ptr->index_size + space_needed)
>
cache_ptr->max_cache_size
)
@@ -9110,13 +9235,13 @@ H5C_make_space_in_cache(H5F_t * f,
total_entries_scanned++;
#endif /* H5C_COLLECT_CACHE_STATS */
- } else {
+ } else {
- /* Skip epoch markers and entries that are in the process
+ /* Skip epoch markers and entries that are in the process
* of being flushed.
*/
didnt_flush_entry = TRUE;
- }
+ }
if ( prev_ptr != NULL ) {
@@ -9127,7 +9252,7 @@ H5C_make_space_in_cache(H5F_t * f,
* Hence no need for sanity checks, as we haven't
* flushed anything. Thus just set entry_ptr to prev_ptr
* and go on.
- */
+ */
entry_ptr = prev_ptr;
} else if ( ( restart_scan )
@@ -9143,7 +9268,7 @@ H5C_make_space_in_cache(H5F_t * f,
/* something has happened to the LRU -- start over
* from the tail.
*/
- restart_scan = FALSE;
+ restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
@@ -9238,14 +9363,14 @@ H5C_make_space_in_cache(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
/* we are scanning the clean LRU, so the serialize function
- * will not be called on any entry -- thus there is no
- * concern about the list being modified out from under
- * this function.
- */
+ * will not be called on any entry -- thus there is no
+ * concern about the list being modified out from under
+ * this function.
+ */
entry_ptr = prev_ptr;
entries_examined++;
- }
+ }
}
done:
@@ -9292,90 +9417,90 @@ H5C_validate_lru_list(H5C_t * cache_ptr)
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
if ( ( ( cache_ptr->LRU_head_ptr == NULL )
- ||
- ( cache_ptr->LRU_tail_ptr == NULL )
- )
- &&
- ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
+ ||
+ ( cache_ptr->LRU_tail_ptr == NULL )
+ )
+ &&
+ ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
}
if ( ( cache_ptr->LRU_list_len < 0 ) || ( cache_ptr->LRU_list_size < 0 ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
}
if ( ( cache_ptr->LRU_list_len == 1 )
- &&
- ( ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
- ||
- ( cache_ptr->LRU_head_ptr == NULL )
- ||
- ( cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size )
- )
+ &&
+ ( ( cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr )
+ ||
+ ( cache_ptr->LRU_head_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size )
+ )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
}
if ( ( cache_ptr->LRU_list_len >= 1 )
- &&
- ( ( cache_ptr->LRU_head_ptr == NULL )
- ||
- ( cache_ptr->LRU_head_ptr->prev != NULL )
- ||
- ( cache_ptr->LRU_tail_ptr == NULL )
- ||
- ( cache_ptr->LRU_tail_ptr->next != NULL )
- )
+ &&
+ ( ( cache_ptr->LRU_head_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_head_ptr->prev != NULL )
+ ||
+ ( cache_ptr->LRU_tail_ptr == NULL )
+ ||
+ ( cache_ptr->LRU_tail_ptr->next != NULL )
+ )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
}
entry_ptr = cache_ptr->LRU_head_ptr;
while ( entry_ptr != NULL )
{
- if ( ( entry_ptr != cache_ptr->LRU_head_ptr ) &&
- ( ( entry_ptr->prev == NULL ) ||
- ( entry_ptr->prev->next != entry_ptr ) ) ) {
+ if ( ( entry_ptr != cache_ptr->LRU_head_ptr ) &&
+ ( ( entry_ptr->prev == NULL ) ||
+ ( entry_ptr->prev->next != entry_ptr ) ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ }
- if ( ( entry_ptr != cache_ptr->LRU_tail_ptr ) &&
- ( ( entry_ptr->next == NULL ) ||
- ( entry_ptr->next->prev != entry_ptr ) ) ) {
+ if ( ( entry_ptr != cache_ptr->LRU_tail_ptr ) &&
+ ( ( entry_ptr->next == NULL ) ||
+ ( entry_ptr->next->prev != entry_ptr ) ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ }
- if ( ( entry_ptr->is_pinned ) ||
- ( entry_ptr->pinned_from_client ) ||
- ( entry_ptr->pinned_from_cache ) ) {
+ if ( ( entry_ptr->is_pinned ) ||
+ ( entry_ptr->pinned_from_client ) ||
+ ( entry_ptr->pinned_from_cache ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ }
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
}
if ( ( cache_ptr->LRU_list_len != len ) ||
- ( cache_ptr->LRU_list_size != size ) ) {
+ ( cache_ptr->LRU_list_size != size ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
}
done:
if ( ret_value != SUCCEED ) {
- HDassert(0);
+ HDassert(0);
}
FUNC_LEAVE_NOAPI(ret_value)
@@ -9422,94 +9547,94 @@ H5C_validate_pinned_entry_list(H5C_t * cache_ptr)
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
if ( ( ( cache_ptr->pel_head_ptr == NULL )
- ||
- ( cache_ptr->pel_tail_ptr == NULL )
- )
- &&
- ( cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr )
+ ||
+ ( cache_ptr->pel_tail_ptr == NULL )
+ )
+ &&
+ ( cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
}
if ( ( cache_ptr->pel_len < 0 ) || ( cache_ptr->pel_size < 0 ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
}
if ( ( cache_ptr->pel_len == 1 )
- &&
- ( ( cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr )
- ||
- ( cache_ptr->pel_head_ptr == NULL )
- ||
- ( cache_ptr->pel_head_ptr->size != cache_ptr->pel_size )
- )
+ &&
+ ( ( cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr )
+ ||
+ ( cache_ptr->pel_head_ptr == NULL )
+ ||
+ ( cache_ptr->pel_head_ptr->size != cache_ptr->pel_size )
+ )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
}
if ( ( cache_ptr->pel_len >= 1 )
- &&
- ( ( cache_ptr->pel_head_ptr == NULL )
- ||
- ( cache_ptr->pel_head_ptr->prev != NULL )
- ||
- ( cache_ptr->pel_tail_ptr == NULL )
- ||
- ( cache_ptr->pel_tail_ptr->next != NULL )
- )
+ &&
+ ( ( cache_ptr->pel_head_ptr == NULL )
+ ||
+ ( cache_ptr->pel_head_ptr->prev != NULL )
+ ||
+ ( cache_ptr->pel_tail_ptr == NULL )
+ ||
+ ( cache_ptr->pel_tail_ptr->next != NULL )
+ )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
}
entry_ptr = cache_ptr->pel_head_ptr;
while ( entry_ptr != NULL )
{
- if ( ( entry_ptr != cache_ptr->pel_head_ptr ) &&
- ( ( entry_ptr->prev == NULL ) ||
- ( entry_ptr->prev->next != entry_ptr ) ) ) {
+ if ( ( entry_ptr != cache_ptr->pel_head_ptr ) &&
+ ( ( entry_ptr->prev == NULL ) ||
+ ( entry_ptr->prev->next != entry_ptr ) ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ }
- if ( ( entry_ptr != cache_ptr->pel_tail_ptr ) &&
- ( ( entry_ptr->next == NULL ) ||
- ( entry_ptr->next->prev != entry_ptr ) ) ) {
+ if ( ( entry_ptr != cache_ptr->pel_tail_ptr ) &&
+ ( ( entry_ptr->next == NULL ) ||
+ ( entry_ptr->next->prev != entry_ptr ) ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ }
- if ( ! entry_ptr->is_pinned ) {
+ if ( ! entry_ptr->is_pinned ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ }
- if ( ! ( ( entry_ptr->pinned_from_client ) ||
- ( entry_ptr->pinned_from_cache ) ) ) {
+ if ( ! ( ( entry_ptr->pinned_from_client ) ||
+ ( entry_ptr->pinned_from_cache ) ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
+ }
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
}
if ( ( cache_ptr->pel_len != len ) ||
- ( cache_ptr->pel_size != size ) ) {
+ ( cache_ptr->pel_size != size ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
}
done:
if ( ret_value != SUCCEED ) {
- HDassert(0);
+ HDassert(0);
}
FUNC_LEAVE_NOAPI(ret_value)
@@ -9556,94 +9681,94 @@ H5C_validate_protected_entry_list(H5C_t * cache_ptr)
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
if ( ( ( cache_ptr->pl_head_ptr == NULL )
- ||
- ( cache_ptr->pl_tail_ptr == NULL )
- )
- &&
- ( cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr )
+ ||
+ ( cache_ptr->pl_tail_ptr == NULL )
+ )
+ &&
+ ( cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
}
if ( ( cache_ptr->pl_len < 0 ) || ( cache_ptr->pl_size < 0 ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
}
if ( ( cache_ptr->pl_len == 1 )
- &&
- ( ( cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr )
- ||
- ( cache_ptr->pl_head_ptr == NULL )
- ||
- ( cache_ptr->pl_head_ptr->size != cache_ptr->pl_size )
- )
+ &&
+ ( ( cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr )
+ ||
+ ( cache_ptr->pl_head_ptr == NULL )
+ ||
+ ( cache_ptr->pl_head_ptr->size != cache_ptr->pl_size )
+ )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
}
if ( ( cache_ptr->pl_len >= 1 )
- &&
- ( ( cache_ptr->pl_head_ptr == NULL )
- ||
- ( cache_ptr->pl_head_ptr->prev != NULL )
- ||
- ( cache_ptr->pl_tail_ptr == NULL )
- ||
- ( cache_ptr->pl_tail_ptr->next != NULL )
- )
+ &&
+ ( ( cache_ptr->pl_head_ptr == NULL )
+ ||
+ ( cache_ptr->pl_head_ptr->prev != NULL )
+ ||
+ ( cache_ptr->pl_tail_ptr == NULL )
+ ||
+ ( cache_ptr->pl_tail_ptr->next != NULL )
+ )
) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
}
entry_ptr = cache_ptr->pl_head_ptr;
while ( entry_ptr != NULL )
{
- if ( ( entry_ptr != cache_ptr->pl_head_ptr ) &&
- ( ( entry_ptr->prev == NULL ) ||
- ( entry_ptr->prev->next != entry_ptr ) ) ) {
+ if ( ( entry_ptr != cache_ptr->pl_head_ptr ) &&
+ ( ( entry_ptr->prev == NULL ) ||
+ ( entry_ptr->prev->next != entry_ptr ) ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ }
- if ( ( entry_ptr != cache_ptr->pl_tail_ptr ) &&
- ( ( entry_ptr->next == NULL ) ||
- ( entry_ptr->next->prev != entry_ptr ) ) ) {
+ if ( ( entry_ptr != cache_ptr->pl_tail_ptr ) &&
+ ( ( entry_ptr->next == NULL ) ||
+ ( entry_ptr->next->prev != entry_ptr ) ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ }
- if ( ! entry_ptr->is_protected ) {
+ if ( ! entry_ptr->is_protected ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ }
- if ( ( entry_ptr->is_read_only ) &&
- ( entry_ptr->ro_ref_count <= 0 ) ) {
+ if ( ( entry_ptr->is_read_only ) &&
+ ( entry_ptr->ro_ref_count <= 0 ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
+ }
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
}
if ( ( cache_ptr->pl_len != len ) ||
- ( cache_ptr->pl_size != size ) ) {
+ ( cache_ptr->pl_size != size ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
}
done:
if ( ret_value != SUCCEED ) {
- HDassert(0);
+ HDassert(0);
}
FUNC_LEAVE_NOAPI(ret_value)
@@ -9689,14 +9814,14 @@ H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
while ( ( node_ptr != NULL ) && ( ! in_slist ) )
{
- entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
HDassert( entry_ptr );
HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( entry_ptr->is_dirty );
- HDassert( entry_ptr->in_slist );
+ HDassert( entry_ptr->is_dirty );
+ HDassert( entry_ptr->in_slist );
- if ( entry_ptr == target_ptr ) {
+ if ( entry_ptr == target_ptr ) {
in_slist = TRUE;
@@ -9756,7 +9881,7 @@ H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
#ifndef NDEBUG
herr_t
H5C_get_entry_ptr_from_addr(const H5F_t *f,
- haddr_t addr,
+ haddr_t addr,
void ** entry_ptr_ptr)
{
H5C_t * cache_ptr;
@@ -9780,21 +9905,21 @@ H5C_get_entry_ptr_from_addr(const H5F_t *f,
*/
if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
}
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
if ( entry_ptr == NULL ) {
- /* the entry doesn't exist in the cache -- report this
- * and quit.
- */
- *entry_ptr_ptr = NULL;
+ /* the entry doesn't exist in the cache -- report this
+ * and quit.
+ */
+ *entry_ptr_ptr = NULL;
} else {
- *entry_ptr_ptr = entry_ptr;
+ *entry_ptr_ptr = entry_ptr;
/* increment call counter */
(cache_ptr->get_entry_ptr_from_addr_counter)++;
@@ -9841,10 +9966,10 @@ done:
#ifndef NDEBUG
herr_t
H5C_verify_entry_type(const H5F_t *f,
- haddr_t addr,
- const H5C_class_t * expected_type,
- hbool_t * in_cache_ptr,
- hbool_t * type_ok_ptr)
+ haddr_t addr,
+ const H5C_class_t * expected_type,
+ hbool_t * in_cache_ptr,
+ hbool_t * type_ok_ptr)
{
H5C_t * cache_ptr;
H5C_cache_entry_t * entry_ptr = NULL;
@@ -9868,21 +9993,21 @@ H5C_verify_entry_type(const H5F_t *f,
*/
if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
}
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
if ( entry_ptr == NULL ) {
- /* the entry doesn't exist in the cache -- report this
- * and quit.
- */
- *in_cache_ptr = FALSE;
+ /* the entry doesn't exist in the cache -- report this
+ * and quit.
+ */
+ *in_cache_ptr = FALSE;
} else {
- *in_cache_ptr = TRUE;
+ *in_cache_ptr = TRUE;
*type_ok_ptr = (expected_type == entry_ptr->type);
}
@@ -9908,7 +10033,7 @@ done:
* before coming into the cache, any external functions that
* use the internal library functions (i.e., tests) should
* use this function if they don't plan on setting up proper
- * tags.
+ * metadata tags.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
@@ -9967,12 +10092,32 @@ H5C_tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr, hid_t dxpl_id)
/* Get the dataset transfer property list */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
/* Get the tag from the DXPL */
if((H5P_get(dxpl, "H5C_tag", &tag)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query property value")
+ if(cache_ptr->ignore_tags != TRUE) {
+#if H5C_DO_TAGGING_SANITY_CHECKS
+ /* Perform some sanity checks to ensure that a correct tag is being applied */
+ if(H5C_verify_tag(entry_ptr->type->id, tag.value) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "tag verification failed")
+#endif
+ } else {
+ /* if we're ignoring tags, it's because we're running
+ tests on internal functions and may not have inserted a tag
+ value into a given dxpl_id before creating some metadata. Thus,
+ in this case only, if a tag value has not been set, we can
+ arbitrarily set it to something for the sake of passing the tests.
+ If the tag value is set, then we'll just let it get assigned without
+ additional checking for correctness. */
+ if(!tag.value) {
+ tag.value = H5AC__IGNORE_TAG;
+ tag.globality = H5C_GLOBALITY_NONE;
+ } /* end if */
+ } /* end if */
+
/* Apply the tag to the entry */
entry_ptr->tag = tag.value;
@@ -9983,6 +10128,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_tag_entry */
+
/*-------------------------------------------------------------------------
*
* Function: H5C_evict_tagged_entries
@@ -10000,13 +10146,12 @@ herr_t
H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag)
{
/* Variable Declarations */
- herr_t status;
+ H5C_t *cache_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
- int i;
hbool_t evicted_entries_last_pass;
hbool_t pinned_entries_need_evicted;
- H5C_t *cache_ptr = NULL;
+ int i;
herr_t ret_value = SUCCEED;
/* Function Enter Macro */
@@ -10078,7 +10223,7 @@ H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag)
} /* end for */
/* Keep doing this until we have stopped evicted entries */
- } while ((evicted_entries_last_pass == TRUE));
+ } while (evicted_entries_last_pass == TRUE);
/* If we stop evicting entries and pinned entries still need evicted,
then we have a problem. */
@@ -10177,12 +10322,89 @@ H5C_flush_marked_entries(H5F_t * f, hid_t dxpl_id)
/* Flush all marked entries */
if(H5C_flush_cache(f, dxpl_id, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_flush_marked_entries */
+#if H5C_DO_TAGGING_SANITY_CHECKS
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_verify_tag
+ *
+ * Purpose: Performs sanity checking on an entrytype/tag pair.
+ *
+ * Return: SUCCEED or FAIL.
+ *
+ * Programmer: Mike McGreevy
+ * January 14, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_verify_tag(int id, haddr_t tag)
+{
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Perform some sanity checks on tag value. Certain entry
+ * types require certain tag values, so check that these
+ * constraints are met. */
+ if(tag == H5AC__IGNORE_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "cannot ignore a tag while doing verification.")
+ else if(tag == H5AC__INVALID_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "no metadata tag provided")
+ else {
+
+ /* Perform some sanity checks on tag value. Certain entry
+ * types require certain tag values, so check that these
+ * constraints are met. */
+
+ /* Superblock */
+ if((id == H5AC_SUPERBLOCK_ID) || (id == H5AC_DRVRINFO_ID)) {
+ if(tag != H5AC__SUPERBLOCK_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "superblock not tagged with H5AC__SUPERBLOCK_TAG")
+ }
+ else {
+ if(tag == H5AC__SUPERBLOCK_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__SUPERBLOCK_TAG applied to non-superblock entry")
+ }
+
+ /* Free Space Manager */
+ if((id == H5AC_FSPACE_HDR_ID) || (id == H5AC_FSPACE_SINFO_ID)) {
+ if(tag != H5AC__FREESPACE_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "freespace entry not tagged with H5AC__FREESPACE_TAG")
+ }
+ else {
+ if(tag == H5AC__FREESPACE_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__FREESPACE_TAG applied to non-freespace entry")
+ }
+
+ /* SOHM */
+ if((id == H5AC_SOHM_TABLE_ID) || (id == H5AC_SOHM_LIST_ID)) {
+ if(tag != H5AC__SOHM_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "sohm entry not tagged with H5AC__SOHM_TAG")
+ }
+
+ /* Global Heap */
+ if(id == H5AC_GHEAP_ID) {
+ if(tag != H5AC__GLOBALHEAP_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "global heap not tagged with H5AC__GLOBALHEAP_TAG")
+ }
+ else {
+ if(tag == H5AC__GLOBALHEAP_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__GLOBALHEAP_TAG applied to non-globalheap entry")
+ }
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_verify_tag */
+#endif
+
/*-------------------------------------------------------------------------
*
@@ -10214,7 +10436,7 @@ H5C_flush_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag)
cache_ptr = f->shared->cache;
/* Mark all entries with specified tag */
- if(H5C_mark_tagged_entries(cache_ptr, tag, FALSE) < 0 )
+ if(H5C_mark_tagged_entries(cache_ptr, tag, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't mark tagged entries")
/* Flush all marked entries */
@@ -10266,6 +10488,50 @@ H5C_retag_entries(H5C_t * cache_ptr, haddr_t src_tag, haddr_t dest_tag)
/*-------------------------------------------------------------------------
+ * Function: H5C_get_entry_ring
+ *
+ * Purpose: Given a file address, retrieve the ring for an entry at that
+ * address.
+ *
+ * On error, the value of *ring is not modified.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 9/8/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring)
+{
+ H5C_t *cache_ptr; /* Pointer to cache */
+ H5C_cache_entry_t *entry_ptr; /* Pointer to cache entry at address */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Locate the entry at the address */
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+ HDassert(entry_ptr);
+
+ /* Return the ring value */
+ *ring = entry_ptr->ring;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_get_entry_ring() */
+
+
+/*-------------------------------------------------------------------------
*
* Function: H5C_cork
*
@@ -10291,8 +10557,6 @@ herr_t
H5C_cork(H5C_t * cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
{
haddr_t *ptr; /* Points to an address */
- haddr_t *addr_ptr = NULL; /* Points to an address */
- hbool_t is_corked; /* Cork status for an entry */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -10305,8 +10569,23 @@ H5C_cork(H5C_t * cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
/* Search the list of corked object addresses in the cache */
ptr = (haddr_t *)H5SL_search(cache_ptr->cork_list_ptr, &obj_addr);
- switch(action) {
- case H5C__SET_CORK:
+ if(H5C__GET_CORKED == action) {
+ HDassert(corked);
+ if(ptr != NULL && *ptr == obj_addr)
+ *corked = TRUE;
+ else
+ *corked = FALSE;
+ } /* end if */
+ else {
+ hbool_t is_corked; /* Cork status for an entry */
+
+ /* Sanity check */
+ HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
+
+ /* Perform appropriate action */
+ if(H5C__SET_CORK == action) {
+ haddr_t *addr_ptr = NULL; /* Points to an address */
+
if(ptr != NULL && *ptr == obj_addr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't cork an already corked object")
@@ -10316,15 +10595,15 @@ H5C_cork(H5C_t * cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
/* Insert into the list */
*addr_ptr = obj_addr;
- if(H5SL_insert(cache_ptr->cork_list_ptr, addr_ptr, addr_ptr) < 0)
+ if(H5SL_insert(cache_ptr->cork_list_ptr, addr_ptr, addr_ptr) < 0) {
+ addr_ptr = H5FL_FREE(haddr_t, addr_ptr);
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't insert address into cork list")
+ } /* end if */
/* Set the entry's cork status */
is_corked = TRUE;
-
- break;
-
- case H5C__UNCORK:
+ } /* end if */
+ else {
if(ptr == NULL)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't uncork an object that is not corked ")
@@ -10338,25 +10617,12 @@ H5C_cork(H5C_t * cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
/* Set the entry's cork status */
is_corked = FALSE;
- break;
-
- case H5C__GET_CORKED:
- HDassert(corked);
- if(ptr != NULL && *ptr == obj_addr)
- *corked = TRUE;
- else
- *corked = FALSE;
- break;
-
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown cork action")
- break;
- } /* end switch */
+ } /* end else */
- if(action != H5C__GET_CORKED)
/* Mark existing cache entries with tag (obj_addr) to the cork status */
if(H5C_mark_tagged_entries_cork(cache_ptr, obj_addr, is_corked) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown cork action")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't mark cork status on entry")
+ } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -10544,3 +10810,4 @@ H5C__assert_flush_dep_nocycle(H5C_cache_entry_t * entry,
FUNC_LEAVE_NOAPI_VOID
} /* H5C__assert_flush_dep_nocycle() */
#endif /* NDEBUG */
+