summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQuincey Koziol <koziol@hdfgroup.org>2015-09-09 06:33:03 (GMT)
committerQuincey Koziol <koziol@hdfgroup.org>2015-09-09 06:33:03 (GMT)
commit26baf1a805b27e2ca34edf313a85f4a6d4081da1 (patch)
tree7404adc256ca658eb84b691bcf0a82e62589757d
parent2132f544596c2e12e86cebf4c1cc6b1d38ffa80d (diff)
downloadhdf5-26baf1a805b27e2ca34edf313a85f4a6d4081da1.zip
hdf5-26baf1a805b27e2ca34edf313a85f4a6d4081da1.tar.gz
hdf5-26baf1a805b27e2ca34edf313a85f4a6d4081da1.tar.bz2
[svn-r27710] Description:
Bring the "metadata rings" code from its branch (mdc_rings_v2) to the trunk. (This change will support the page buffering feature) Tested on: MacOSX/64 10.10.5 (amazon) w/serial & parallel (h5committest forthcoming)
-rw-r--r--src/H5AC.c110
-rw-r--r--src/H5ACprivate.h18
-rw-r--r--src/H5C.c1308
-rw-r--r--src/H5Cpkg.h455
-rw-r--r--src/H5Cprivate.h77
-rw-r--r--src/H5FScache.c16
-rw-r--r--src/H5Fsuper.c74
-rw-r--r--src/H5Fsuper_cache.c10
-rw-r--r--src/H5MF.c80
-rw-r--r--src/H5Pdxpl.c12
-rw-r--r--src/H5SM.c25
-rw-r--r--test/cache_common.c4
-rw-r--r--testpar/t_cache.c2
13 files changed, 1520 insertions, 671 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index a94a727..525f78b 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -2321,3 +2321,113 @@ H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag)
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_retag_copied_metadata */
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_get_entry_ring
+ *
+ * Purpose: Given a file address, retrieve the ring for an entry at that
+ * address.
+ *
+ * On error, the value of *ring is not modified.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 9/8/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_get_entry_ring(const H5F_t *f, haddr_t addr, H5AC_ring_t *ring)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(ring);
+
+ /* Retrieve the ring value for the entry at address */
+ if(H5C_get_entry_ring(f, addr, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "Can't retrieve ring for entry")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_get_entry_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_set_ring
+ *
+ * Purpose: Routine to set the ring on a DXPL (for passing through
+ * to the metadata cache).
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, September 8, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_set_ring(hid_t dxpl_id, H5AC_ring_t ring, H5P_genplist_t **dxpl,
+ H5AC_ring_t *orig_ring)
+{
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(dxpl);
+ HDassert(orig_ring);
+
+ /* Set the ring type in the DXPL */
+ if(NULL == ((*dxpl) = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
+ if((H5P_get((*dxpl), H5AC_RING_NAME, orig_ring)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get original ring value")
+ if((H5P_set((*dxpl), H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set ring value")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5AC_set_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_reset_ring
+ *
+ * Purpose: Routine to reset the original ring on a DXPL (after passing
+ * through to the metadata cache).
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, September 8, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_reset_ring(H5P_genplist_t *dxpl, H5AC_ring_t orig_ring)
+{
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Reset the ring in the DXPL, if it's been changed */
+ if(orig_ring) {
+ /* Sanity check */
+ HDassert(dxpl);
+
+ if((H5P_set(dxpl, H5AC_RING_NAME, &orig_ring)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set property value")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5AC_reset_ring() */
+
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 584ce9d..fded15c 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -132,6 +132,15 @@ typedef enum {
#define H5AC__SERIALIZE_MOVED_FLAG H5C__SERIALIZE_MOVED_FLAG
#define H5AC__SERIALIZE_COMPRESSED_FLAG H5C__SERIALIZE_COMPRESSED_FLAG
+/* Aliases for the "ring" type and values */
+typedef H5C_ring_t H5AC_ring_t;
+#define H5AC_RING_INV H5C_RING_UNDEFINED
+#define H5AC_RING_US H5C_RING_USER
+#define H5AC_RING_FSM H5C_RING_FSM
+#define H5AC_RING_SBE H5C_RING_SBE
+#define H5AC_RING_SB H5C_RING_SB
+#define H5AC_RING_NTYPES H5C_RING_NTYPES
+
/* Aliases for 'notify action' type & values */
typedef H5C_notify_action_t H5AC_notify_action_t;
#define H5AC_NOTIFY_ACTION_AFTER_INSERT H5C_NOTIFY_ACTION_AFTER_INSERT
@@ -185,6 +194,8 @@ typedef H5C_t H5AC_t;
#define H5AC_METADATA_TAG_SIZE sizeof(haddr_t)
#define H5AC_METADATA_TAG_DEF H5AC__INVALID_TAG
+#define H5AC_RING_NAME "H5AC_ring_type"
+
/* Dataset transfer property list for flush calls */
/* (Collective set, "block before metadata write" set and "library internal" set) */
/* (Global variable declaration, definition is in H5AC.c) */
@@ -350,9 +361,16 @@ H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_close_trace_file(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_name);
+
+/* Tag & Ring routines */
H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag);
H5_DLL herr_t H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag);
H5_DLL herr_t H5AC_ignore_tags(const H5F_t *f);
+H5_DLL herr_t H5AC_get_entry_ring(const H5F_t *f, haddr_t addr, H5AC_ring_t *ring);
+H5_DLL herr_t H5AC_set_ring(hid_t dxpl_id, H5AC_ring_t ring, H5P_genplist_t **dxpl,
+ H5AC_ring_t *orig_ring);
+H5_DLL herr_t H5AC_reset_ring(H5P_genplist_t *dxpl, H5AC_ring_t orig_ring);
+
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
diff --git a/src/H5C.c b/src/H5C.c
index b360e9d..cd04f7e 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -147,6 +147,12 @@ static herr_t H5C_flush_invalidate_cache(const H5F_t * f,
hid_t dxpl_id,
unsigned flags);
+static herr_t H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id,
+ H5C_ring_t ring, unsigned flags);
+
+static herr_t H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned flags);
+
static void * H5C_load_entry(H5F_t * f,
hid_t dxpl_id,
const H5C_class_t * type,
@@ -496,6 +502,16 @@ H5C_create(size_t max_cache_size,
cache_ptr->clean_index_size = (size_t)0;
cache_ptr->dirty_index_size = (size_t)0;
+ for(i = 0; i < H5C_RING_NTYPES; i++) {
+ cache_ptr->index_ring_len[i] = 0;
+ cache_ptr->index_ring_size[i] = (size_t)0;
+ cache_ptr->clean_index_ring_size[i] = (size_t)0;
+ cache_ptr->dirty_index_ring_size[i] = (size_t)0;
+
+ cache_ptr->slist_ring_len[i] = 0;
+ cache_ptr->slist_ring_size[i] = (size_t)0;
+ } /* end for */
+
/* Tagging Field Initializations */
cache_ptr->ignore_tags = FALSE;
@@ -510,10 +526,8 @@ H5C_create(size_t max_cache_size,
cache_ptr->slist_size_increase = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ )
- {
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++)
(cache_ptr->index)[i] = NULL;
- }
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
@@ -1029,40 +1043,70 @@ done:
*
* JRM -- 12/13/14
*
+ * Modified function to support rings. Basic idea is that
+ * every entry in the cache is assigned to a ring. Entries
+ * in the outermost ring are flushed first, followed by
+ * those in the next outermost ring, and so on until the
+ * innermost ring is flushed. See header comment on
+ * H5C_ring_t in H5Cprivate.h for a more detailed
+ * discussion.
+ *
+ * JRM -- 8/30/15
+ *
*-------------------------------------------------------------------------
*/
herr_t
H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
{
- H5C_t * cache_ptr = f->shared->cache;
- herr_t ret_value = SUCCEED;
- hbool_t destroy;
- hbool_t flushed_entries_last_pass;
- hbool_t flush_marked_entries;
- hbool_t ignore_protected;
- hbool_t tried_to_flush_protected_entry = FALSE;
- hbool_t restart_slist_scan;
- int32_t passes = 0;
- int32_t protected_entries = 0;
- H5SL_node_t * node_ptr = NULL;
- H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t flushed_entries_count = 0;
- int64_t flushed_entries_size = 0;
- int64_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
- int64_t entry_size_change;
- int64_t * entry_size_change_ptr = &entry_size_change;
-#else /* H5C_DO_SANITY_CHECKS */
- int64_t * entry_size_change_ptr = NULL;
+ int i;
+ int32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ int32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ hbool_t destroy;
+ hbool_t ignore_protected;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( cache_ptr->slist_ptr );
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
@@ -1073,13 +1117,6 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
-
- /* note that flush_marked_entries is set to FALSE if destroy is TRUE */
- flush_marked_entries = ( ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 )
- &&
- ( ! destroy )
- );
-
HDassert( ! ( destroy && ignore_protected ) );
HDassert( ! ( cache_ptr->flush_in_progress ) );
@@ -1088,337 +1125,18 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
if(destroy) {
if(H5C_flush_invalidate_cache(f, dxpl_id, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed.")
- } else {
- /* When we are only flushing marked entries, the slist will usually
- * still contain entries when we have flushed everything we should.
- * Thus we track whether we have flushed any entries in the last
- * pass, and terminate if we haven't.
- */
-
- flushed_entries_last_pass = TRUE;
-
- /* set the cache_ptr->slist_change_in_pre_serialize and
- * cache_ptr->slist_change_in_serialize to false.
- *
- * These flags are set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize or serialize call respectively.
- * H5C_flush_cache uses these flags to detect any modifications
- * to the slist that might corrupt the scan of the slist -- and
- * restart the scan in this event.
+ } /* end if */
+ else {
+ /* flush each ring, starting from the outermost ring and
+ * working inward.
*/
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- while ( ( passes < H5C__MAX_PASSES_ON_FLUSH ) &&
- ( cache_ptr->slist_len != 0 ) &&
- ( protected_entries == 0 ) &&
- ( flushed_entries_last_pass ) )
- {
- unsigned curr_flush_dep_height = 0;
- unsigned flush_dep_passes = 0;
-
- flushed_entries_last_pass = FALSE;
-
- /* Loop over all flush dependency heights of entries */
- while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->slist_len != 0) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH) )
- {
- hbool_t flushed_during_dep_loop = FALSE;
-
-#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* We then zero counters that we use to track the number
- * and total size of entries flushed:
- */
- flushed_entries_count = 0;
- flushed_entries_size = 0;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
-
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
-#endif /* H5C_DO_SANITY_CHECKS */
-
- restart_slist_scan = TRUE;
-
- while ( ( restart_slist_scan ) || ( node_ptr != NULL ) )
- {
- if ( restart_slist_scan )
- {
- restart_slist_scan = FALSE;
-
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
-
- if ( node_ptr == NULL )
- {
- /* the slist is empty -- break out of inner loop */
- break;
- }
- HDassert( node_ptr != NULL );
-
- /* Get cache entry for this node */
- next_entry_ptr =
- (H5C_cache_entry_t *)H5SL_item(node_ptr);
-
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "next_entry_ptr == NULL ?!?!")
-
- HDassert( next_entry_ptr->magic == \
- H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- }
-
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, I have inserted code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
-
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
-
- if ( node_ptr != NULL ) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( NULL == next_entry_ptr )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert( next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
- HDassert( next_entry_ptr->is_dirty );
- HDassert( next_entry_ptr->in_slist );
- HDassert( entry_ptr != next_entry_ptr );
- } else {
- next_entry_ptr = NULL;
- }
-
- HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->in_slist );
-
- if ( ( ( ! flush_marked_entries ) ||
- ( entry_ptr->flush_marker ) ) &&
- ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) ) {
-
- if ( entry_ptr->is_protected ) {
-
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
-
- } else if ( entry_ptr->is_pinned ) {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
-#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
-
-#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- flushed_during_dep_loop = TRUE;
-
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize
- = FALSE;
- cache_ptr->slist_change_in_serialize
- = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end if */
- else {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height ) {
-#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-
-#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- flushed_during_dep_loop = TRUE;
-
- if ((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize))
- {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize
- = FALSE;
- cache_ptr->slist_change_in_serialize
- = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- }
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
-
- /* Check for incrementing flush dependency height */
- if(flushed_during_dep_loop) {
- /* If we flushed an entry at this flush dependency height
- * start over at the bottom level of the flush dependencies
- */
- curr_flush_dep_height = 0;
-
- /* Make certain we don't get stuck in an infinite loop */
- flush_dep_passes++;
-
- /* Set flag for outer loop */
- flushed_entries_last_pass = TRUE;
- } /* end if */
- else
- curr_flush_dep_height++;
-
- } /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
-
- passes++;
-
-#if H5C_DO_SANITY_CHECKS
- /* Verify that the slist size and length are as expected. */
-
- HDassert( (initial_slist_len + cache_ptr->slist_len_increase -
- flushed_entries_count) == cache_ptr->slist_len );
- HDassert( (size_t)((int64_t)initial_slist_size +
- cache_ptr->slist_size_increase -
- flushed_entries_size) == cache_ptr->slist_size );
-#endif /* H5C_DO_SANITY_CHECKS */
-
- } /* while */
-
- HDassert( protected_entries <= cache_ptr->pl_len );
-
- if ( ( ( cache_ptr->pl_len > 0 ) && ( !ignore_protected ) )
- ||
- ( tried_to_flush_protected_entry ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "cache has protected items")
- }
-
- if ( ( cache_ptr->slist_len != 0 ) &&
- ( passes >= H5C__MAX_PASSES_ON_FLUSH ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "flush pass limit exceeded.")
- }
-
-#if H5C_DO_SANITY_CHECKS
- if ( ! flush_marked_entries ) {
-
- HDassert( cache_ptr->slist_len == 0 );
- HDassert( cache_ptr->slist_size == 0 );
- }
-#endif /* H5C_DO_SANITY_CHECKS */
-
- }
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C_flush_ring(f, dxpl_id, ring, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed.")
+ ring++;
+ } /* end while */
+ } /* end else */
done:
cache_ptr->flush_in_progress = FALSE;
@@ -2031,7 +1749,8 @@ H5C_insert_entry(H5F_t * f,
unsigned int flags)
{
H5C_t * cache_ptr;
- herr_t result;
+ H5P_genplist_t *dxpl;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
hbool_t insert_pinned;
hbool_t flush_last;
#ifdef H5_HAVE_PARALLEL
@@ -2078,6 +1797,14 @@ H5C_insert_entry(H5F_t * f,
flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 );
#endif /* H5_HAVE_PARALLEL */
+ /* Get the dataset transfer property list */
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "not a property list")
+
+ /* Get the ring type from the DXPL */
+ if((H5P_get(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to query ring value")
+
entry_ptr = (H5C_cache_entry_t *)thing;
/* verify that the new entry isn't already in the hash table -- scream
@@ -2155,6 +1882,8 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->flush_in_progress = FALSE;
entry_ptr->destroy_in_progress = FALSE;
+ entry_ptr->ring = ring;
+
/* Initialize flush dependency height fields */
entry_ptr->flush_dep_parent = NULL;
for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
@@ -2175,13 +1904,8 @@ H5C_insert_entry(H5F_t * f,
if ( ( cache_ptr->flash_size_increase_possible ) &&
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
- result = H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "H5C__flash_increase_cache_size failed.")
- }
+ if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed.")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -2198,33 +1922,20 @@ H5C_insert_entry(H5F_t * f,
size_t space_needed;
- if ( empty_space <= entry_ptr->size ) {
-
+ if(empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- }
-
- if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "Can't get write_permitted")
- }
- } else {
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
+ } /* end if */
+ else
write_permitted = cache_ptr->write_permitted;
- }
-
- HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE );
+ HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
space_needed = entry_ptr->size;
-
- if ( space_needed > cache_ptr->max_cache_size ) {
-
+ if(space_needed > cache_ptr->max_cache_size)
space_needed = cache_ptr->max_cache_size;
- }
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
@@ -2251,16 +1962,8 @@ H5C_insert_entry(H5F_t * f,
* no point in worrying about the third.
*/
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- space_needed,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "H5C_make_space_in_cache failed.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_make_space_in_cache failed.")
}
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -2882,6 +2585,8 @@ H5C_protect(H5F_t * f,
unsigned flags)
{
H5C_t * cache_ptr;
+ H5P_genplist_t *dxpl;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
hbool_t hit;
hbool_t have_write_permitted = FALSE;
hbool_t read_only = FALSE;
@@ -2890,7 +2595,6 @@ H5C_protect(H5F_t * f,
hbool_t flush_collectively;
#endif /* H5_HAVE_PARALLEL */
hbool_t write_permitted;
- herr_t result;
size_t empty_space;
void * thing;
H5C_cache_entry_t * entry_ptr;
@@ -2925,10 +2629,20 @@ H5C_protect(H5F_t * f,
flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 );
#endif /* H5_HAVE_PARALLEL */
+ /* Get the dataset transfer property list */
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "not a property list")
+
+ /* Get the ring type from the DXPL */
+ if((H5P_get(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "unable to query ring value")
+
/* first check to see if the target is in cache */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
if ( entry_ptr != NULL ) {
+ if(entry_ptr->ring != ring)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry\n");
/* Check for trying to load the wrong type of entry from an address */
if(entry_ptr->type != type)
@@ -2936,7 +2650,6 @@ H5C_protect(H5F_t * f,
#if H5C_DO_TAGGING_SANITY_CHECKS
{
- H5P_genplist_t *dxpl; /* dataset transfer property list */
haddr_t tag = HADDR_UNDEF;
/* The entry is already in the cache, but make sure that the tag value
@@ -2945,21 +2658,15 @@ H5C_protect(H5F_t * f,
and it would have received a legal tag value after getting loaded
from disk. */
- /* Get the dataset transfer property list */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a property list");
-
/* Get the tag from the DXPL */
- if( (H5P_get(dxpl, "H5AC_metadata_tag", &tag)) < 0 )
+ if((H5P_get(dxpl, "H5AC_metadata_tag", &tag)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to query property value");
/* Verify tag value */
- if (cache_ptr->ignore_tags != TRUE) {
-
+ if(cache_ptr->ignore_tags != TRUE) {
/* Verify legal tag value */
- if ( (H5C_verify_tag(entry_ptr->type->id, tag)) < 0 )
+ if((H5C_verify_tag(entry_ptr->type->id, tag)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed");
-
} /* end if */
}
#endif
@@ -2982,6 +2689,8 @@ H5C_protect(H5F_t * f,
entry_ptr = (H5C_cache_entry_t *)thing;
+ entry_ptr->ring = ring;
+
/* Apply tag to newly protected entry */
if(H5C_tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
@@ -2992,14 +2701,8 @@ H5C_protect(H5F_t * f,
if ( ( cache_ptr->flash_size_increase_possible ) &&
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
- result = H5C__flash_increase_cache_size(cache_ptr, 0,
- entry_ptr->size);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C__flash_increase_cache_size failed.")
- }
+ if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed.")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -3025,20 +2728,11 @@ H5C_protect(H5F_t * f,
if(empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Can't get write_permitted 1")
-
- } else {
-
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
+ else
have_write_permitted = TRUE;
-
- }
} else {
write_permitted = cache_ptr->write_permitted;
@@ -3085,16 +2779,8 @@ H5C_protect(H5F_t * f,
* see no point in worrying about the fourth.
*/
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- space_needed,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C_make_space_in_cache failed 1.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 1.")
}
/* Insert the entry in the hash table. It can't be dirty yet, so
@@ -3188,19 +2874,10 @@ H5C_protect(H5F_t * f,
if ( ! have_write_permitted ) {
if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Can't get write_permitted 2")
-
- } else {
-
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 2")
+ else
have_write_permitted = TRUE;
-
- }
} else {
write_permitted = cache_ptr->write_permitted;
@@ -3214,14 +2891,8 @@ H5C_protect(H5F_t * f,
( cache_ptr->cache_accesses >=
(cache_ptr->resize_ctl).epoch_length ) ) {
- result = H5C__auto_adjust_cache_size(f,
- dxpl_id,
- write_permitted);
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Cache auto-resize failed.")
- }
+ if(H5C__auto_adjust_cache_size(f, dxpl_id, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed.")
}
if ( cache_ptr->size_decreased ) {
@@ -3250,16 +2921,8 @@ H5C_protect(H5F_t * f,
if(cache_ptr->index_size > cache_ptr->max_cache_size)
cache_ptr->cache_full = TRUE;
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- (size_t)0,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "H5C_make_space_in_cache failed 2.")
- }
+ if(H5C_make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 2.")
}
}
}
@@ -5380,6 +5043,7 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
HDassert(H5F_addr_defined(child_entry->addr));
HDassert(child_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
cache_ptr = parent_entry->cache_ptr;
+ HDassert(parent_entry->ring == child_entry->ring);
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr == child_entry->cache_ptr);
@@ -6846,46 +6510,139 @@ done:
* Programmer: John Mainzer
* 3/24/065
*
- * Changes: Modified function to test for slist chamges in
- * pre_serialize and serialize callbacks, and re-start
- * scans through the slist when such changes occur.
- *
- * This has been a potential problem for some time,
- * and there has been code in this function to deal
- * with elements of this issue. However the shift
- * to the V3 cache in combination with the activities
- * of some of the cache clients (in particular the
- * free space manager and the fractal heap) have
- * made this re-work necessary in H5C_flush_cache.
- *
- * At present, this issue doesn't seem to be causing problems
- * in H5C_flush_invalidate_cache(). However, it seems
- * prudent to port the H5C_flush_cache changes to this
- * function as well.
- *
- * JRM -- 12/14/14
- *
- * Added code to track entry size change during flush single
- * entry. This didn't used to be a problem, as the entry
- * was largely removed from the cache data structures before
- * the flush proper. However, re-entrant calls to the cache
- * in the parallel case required a re-factoring of the
- * H5C__flush_single_entry() function to keep entries fully
- * in the cache until after the pre-serialize and serialize
- * calls.
- * JRM -- 12/25/14
- *
*-------------------------------------------------------------------------
*/
static herr_t
H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
{
H5C_t * cache_ptr;
+ H5C_ring_t ring;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+{
+ int32_t i;
+ int32_t index_len = 0;
+ int32_t slist_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+}
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* remove ageout markers if present */
+ if(cache_ptr->epoch_markers_active > 0)
+ if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
+
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C_flush_invalidate_ring(f, dxpl_id, ring, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed.")
+ ring++;
+ } /* end while */
+
+ /* Invariants, after destroying all entries in the hash table */
+ HDassert(cache_ptr->index_size == 0);
+ HDassert(cache_ptr->clean_index_size == 0);
+ HDassert(cache_ptr->dirty_index_size == 0);
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+ HDassert(cache_ptr->pel_len == 0);
+ HDassert(cache_ptr->pel_size == 0);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->pl_size == 0);
+ HDassert(cache_ptr->LRU_list_len == 0);
+ HDassert(cache_ptr->LRU_list_size == 0);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_invalidate_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_invalidate_ring
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache and ring.
+ *
+ * If the ring contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the cache flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned flags)
+{
+ H5C_t * cache_ptr;
hbool_t restart_slist_scan;
int32_t protected_entries = 0;
int32_t i;
- int32_t cur_pel_len;
- int32_t old_pel_len;
+ int32_t cur_ring_pel_len;
+ int32_t old_ring_pel_len;
int32_t passes = 0;
unsigned cooked_flags;
H5SL_node_t * node_ptr = NULL;
@@ -6911,23 +6668,23 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->epoch_markers_active == 0);
/* Filter out the flags that are not relevant to the flush/invalidate.
* At present, only the H5C__FLUSH_CLEAR_ONLY_FLAG is kept.
*/
cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
- /* remove ageout markers if present */
- if(cache_ptr->epoch_markers_active > 0)
- if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
/* The flush proceedure here is a bit strange.
*
* In the outer while loop we make at least one pass through the
- * cache, and then repeat until either all the pinned entries
- * unpin themselves, or until the number of pinned entries stops
- * declining. In this later case, we scream and die.
+ * cache, and then repeat until either all the pinned entries in
+ * the ring unpin themselves, or until the number of pinned entries
+ * in the ring stops declining. In this later case, we scream and die.
*
* Since the fractal heap can dirty, resize, and/or move entries
* in is flush callback, it is possible that the cache will still
@@ -6953,19 +6710,27 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
* to increasing address order, but there are no guarantees.
*/
- cur_pel_len = cache_ptr->pel_len;
- old_pel_len = cache_ptr->pel_len;
+ /* compute the number of pinned entries in this ring */
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+ if(entry_ptr->ring == ring)
+ cur_ring_pel_len++;
- while ( cache_ptr->index_len > 0 )
- {
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ old_ring_pel_len = cur_ring_pel_len;
+ while(cache_ptr->index_ring_len[ring] > 0) {
unsigned curr_flush_dep_height = 0;
unsigned flush_dep_passes = 0;
/* Loop over all flush dependency heights of entries */
while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->index_len > 0 ) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH) )
- {
+ (cache_ptr->index_ring_len[ring] > 0) &&
+ (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
hbool_t flushed_during_dep_loop = FALSE;
/* first, try to flush-destroy any dirty entries. Do this by
@@ -7039,6 +6804,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
} /* end if */
entry_ptr = next_entry_ptr;
@@ -7057,6 +6823,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->in_slist);
HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
/* increment node pointer now, before we delete its target
* from the slist.
@@ -7069,6 +6836,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
HDassert(entry_ptr != next_entry_ptr);
} /* end if */
else
@@ -7087,10 +6855,10 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HDassert(entry_ptr != NULL);
HDassert(entry_ptr->in_slist);
- if ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) {
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->ring == ring)) {
if(entry_ptr->is_protected) {
/* we have major problems -- but lets flush
* everything we can before we flag an error.
@@ -7233,15 +7001,16 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
while(next_entry_ptr != NULL) {
entry_ptr = next_entry_ptr;
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
next_entry_ptr = entry_ptr->ht_next;
HDassert((next_entry_ptr == NULL) ||
(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
- if ( ( ! entry_ptr->flush_me_last ) ||
- ( ( entry_ptr->flush_me_last ) &&
- ( cache_ptr->num_last_entries >=
- cache_ptr->slist_len ) ) ) {
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->ring == ring)) {
if(entry_ptr->is_protected) {
/* we have major problems -- but lets flush and
@@ -7284,7 +7053,9 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
entry_was_dirty = entry_ptr->is_dirty;
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), NULL) < 0)
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
if(entry_was_dirty) {
@@ -7350,16 +7121,25 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
} /* end while loop over flush dependency heights */
- old_pel_len = cur_pel_len;
- cur_pel_len = cache_ptr->pel_len;
+ old_ring_pel_len = cur_ring_pel_len;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+
+ if(entry_ptr->ring == ring)
+ cur_ring_pel_len++;
- if ( ( cur_pel_len > 0 ) && ( cur_pel_len >= old_pel_len ) ) {
+ entry_ptr = entry_ptr->next;
+ } /* end while */
- /* The number of pinned entries is positive, and it is not
- * declining. Scream and die.
+ if((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ /* The number of pinned entries in the ring is positive, and
+ * it is not declining. Scream and die.
*/
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_pel_len = %d, old_pel_len = %d", (int)cur_pel_len, (int)old_pel_len)
- } else if ( ( cur_pel_len == 0 ) && ( old_pel_len == 0 ) ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
+ } else if((cur_ring_pel_len == 0) && (old_ring_pel_len == 0)) {
/* increment the pass count */
passes++;
}
@@ -7371,36 +7151,409 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Maximum passes on flush exceeded.")
} /* main while loop */
- /* Invariants, after destroying all entries in the hash table */
- HDassert(cache_ptr->index_size == 0);
- HDassert(cache_ptr->clean_index_size == 0);
- HDassert(cache_ptr->dirty_index_size == 0);
- HDassert(cache_ptr->slist_len == 0);
- HDassert(cache_ptr->slist_size == 0);
- HDassert(cache_ptr->pel_len == 0);
- HDassert(cache_ptr->pel_size == 0);
- HDassert(cache_ptr->pl_len == 0);
- HDassert(cache_ptr->pl_size == 0);
- HDassert(cache_ptr->LRU_list_len == 0);
- HDassert(cache_ptr->LRU_list_size == 0);
+ /* Invariants, after destroying all entries in the ring */
+ for(i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
+ HDassert(cache_ptr->index_ring_len[i] == 0);
+ HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
+ } /* end for */
HDassert(protected_entries <= cache_ptr->pl_len);
if(protected_entries > 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries.")
+ else if(cur_ring_pel_len > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring.")
- else if(cur_pel_len > 0) {
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_invalidate_ring() */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries.")
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_ring
+ *
+ * Purpose: Flush the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been flushed on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be flushed. However all unprotected entries in the target
+ * ring should be flushed before the function returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the slist flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t * cache_ptr = f->shared->cache;
+ hbool_t flushed_entries_last_pass;
+ hbool_t flush_marked_entries;
+ hbool_t ignore_protected;
+ hbool_t tried_to_flush_protected_entry = FALSE;
+ hbool_t restart_slist_scan;
+ int32_t passes = 0;
+ int32_t protected_entries = 0;
+ H5SL_node_t * node_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t * next_entry_ptr = NULL;
+#if H5C_DO_SANITY_CHECKS
+ int64_t flushed_entries_count = 0;
+ int64_t flushed_entries_size = 0;
+ int64_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+ int64_t entry_size_change;
+ int64_t * entry_size_change_ptr = &entry_size_change;
+#else /* H5C_DO_SANITY_CHECKS */
+ int64_t * entry_size_change_ptr = NULL;
+#endif /* H5C_DO_SANITY_CHECKS */
+ int i;
+ herr_t ret_value = SUCCEED;
- }
+ FUNC_ENTER_NOAPI(FAIL)
-done:
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
- FUNC_LEAVE_NOAPI(ret_value)
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
+ (H5C_validate_lru_list(cache_ptr) < 0)) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n");
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-} /* H5C_flush_invalidate_cache() */
+ ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
+ flush_marked_entries = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 );
+
+ if(!flush_marked_entries)
+ for(i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+
+ HDassert(cache_ptr->flush_in_progress);
+
+ /* When we are only flushing marked entries, the slist will usually
+ * still contain entries when we have flushed everything we should.
+ * Thus we track whether we have flushed any entries in the last
+ * pass, and terminate if we haven't.
+ */
+ flushed_entries_last_pass = TRUE;
+
+ /* set the cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize to false.
+ *
+ * These flags are set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize or serialize call respectively.
+ * H5C_flush_cache uses these flags to detect any modifications
+ * to the slist that might corrupt the scan of the slist -- and
+ * restart the scan in this event.
+ */
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ while((passes < H5C__MAX_PASSES_ON_FLUSH) &&
+ (cache_ptr->slist_ring_len[ring] > 0) &&
+ (protected_entries == 0) &&
+ (flushed_entries_last_pass)) {
+ unsigned curr_flush_dep_height = 0;
+ unsigned flush_dep_passes = 0;
+
+ flushed_entries_last_pass = FALSE;
+
+ /* Loop over all flush dependency heights of entries */
+ while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
+ (cache_ptr->slist_ring_len[ring] > 0) &&
+ (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
+ hbool_t flushed_during_dep_loop = FALSE;
+
+#if H5C_DO_SANITY_CHECKS
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* We then zero counters that we use to track the number
+ * and total size of entries flushed:
+ */
+ flushed_entries_count = 0;
+ flushed_entries_size = 0;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ restart_slist_scan = TRUE;
+
+ while((restart_slist_scan ) || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ if(node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
+
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
+
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if(node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
+
+ if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
+ ((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+ } /* end if */
+ else if(entry_ptr->is_pinned) {
+
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush. Note that we
+ * aren't trying to do a destroy here, so that
+ * is not an issue.
+ */
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+#if H5C_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ flushed_during_dep_loop = TRUE;
+
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end else-if */
+ else {
+ /* Test to see if we are can flush the entry now.
+ * If we can, go ahead and flush. Note that we
+ * aren't trying to do a destroy here, so that
+ * is not an issue.
+ */
+ if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+#if H5C_DO_SANITY_CHECKS
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
+
+#if H5C_DO_SANITY_CHECKS
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ flushed_during_dep_loop = TRUE;
+
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end if */
+ else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
+ /* This shouldn't happen -- if it does, just scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ } /* end else */
+ } /* end if */
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
+
+ /* Check for incrementing flush dependency height */
+ if(flushed_during_dep_loop) {
+
+ /* If we flushed an entry at this flush dependency height
+ * start over at the bottom level of the flush dependencies
+ */
+ curr_flush_dep_height = 0;
+
+ /* Make certain we don't get stuck in an infinite loop */
+ flush_dep_passes++;
+
+ /* Set flag for outer loop */
+ flushed_entries_last_pass = TRUE;
+ } /* end if */
+ else
+ curr_flush_dep_height++;
+ } /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
+
+ passes++;
+
+#if H5C_DO_SANITY_CHECKS
+ /* Verify that the slist size and length are as expected. */
+ HDassert((initial_slist_len + cache_ptr->slist_len_increase -
+ flushed_entries_count) == cache_ptr->slist_len);
+ HDassert((size_t)((int64_t)initial_slist_size +
+ cache_ptr->slist_size_increase -
+ flushed_entries_size) == cache_ptr->slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+ } /* while */
+
+ HDassert(protected_entries <= cache_ptr->pl_len);
+
+ if(((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
+
+ if((cache_ptr->slist_len != 0) && (passes >= H5C__MAX_PASSES_ON_FLUSH))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush pass limit exceeded.")
+
+#if H5C_DO_SANITY_CHECKS
+ if(!flush_marked_entries) {
+ HDassert(cache_ptr->slist_ring_len[ring] == 0);
+ HDassert(cache_ptr->slist_ring_size[ring] == 0);
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_ring() */
/*-------------------------------------------------------------------------
@@ -7490,6 +7643,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(entry_ptr);
+ HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
/* If defined, initialize *entry_size_change_ptr to 0 */
if(entry_size_change_ptr != NULL)
@@ -8498,6 +8652,8 @@ H5C_load_entry(H5F_t * f,
entry->flush_in_progress = FALSE;
entry->destroy_in_progress = FALSE;
+ entry->ring = H5C_RING_UNDEFINED;
+
/* Initialize flush dependency height fields */
entry->flush_dep_parent = NULL;
for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
@@ -9865,3 +10021,47 @@ H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag)
FUNC_LEAVE_NOAPI_VOID
} /* H5C_retag_copied_metadata */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_get_entry_ring
+ *
+ * Purpose: Given a file address, retrieve the ring for an entry at that
+ * address.
+ *
+ * On error, the value of *ring is not modified.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 9/8/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring)
+{
+ H5C_t *cache_ptr; /* Pointer to cache */
+ H5C_cache_entry_t *entry_ptr; /* Pointer to cache entry at address */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Locate the entry at the address */
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+ HDassert(entry_ptr);
+
+ /* Return the ring value */
+ *ring = entry_ptr->ring;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_get_entry_ring() */
+
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 7d9d0f9..ea965fd 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -824,6 +824,12 @@ if ( ( (entry_ptr) == NULL ) || \
*
* JRM -- 11/5/08
*
+ * - Updated existing index macros and sanity check macros to maintain
+ * the index_ring_len, index_ring_size, clean_index_ring_size, and
+ * dirty_index_ring_size fields of H5C_t.
+ *
+ * JRM -- 9/1/15
+ *
***********************************************************************/
/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */
@@ -848,7 +854,17 @@ if ( ( (cache_ptr) == NULL ) || \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
"Pre HT insert SC failed") \
}
@@ -860,7 +876,16 @@ if ( ( (cache_ptr) == NULL ) || \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
"Post HT insert SC failed") \
}
@@ -887,7 +912,20 @@ if ( ( (cache_ptr) == NULL ) || \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] < \
+ (entry_ptr)->size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
}
@@ -903,7 +941,15 @@ if ( ( (cache_ptr) == NULL ) || \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT remove SC failed") \
}
@@ -971,7 +1017,18 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->clean_index_size < (old_size) ) ) && \
( ( (was_clean) ) || \
( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
- ( (entry_ptr) == NULL ) ) { \
+ ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT entry size change SC failed") \
}
@@ -992,7 +1049,15 @@ if ( ( (cache_ptr) == NULL ) || \
( ( ((entry_ptr)->is_dirty) ) || \
( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
( ( (cache_ptr)->index_len == 1 ) && \
- ( (cache_ptr)->index_size != (new_size) ) ) ) { \
+ ( (cache_ptr)->index_size != (new_size) ) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Post HT entry size change SC failed") \
}
@@ -1009,7 +1074,18 @@ if ( \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT update for entry clean SC failed") \
}
@@ -1026,7 +1102,18 @@ if ( \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT update for entry dirty SC failed") \
}
@@ -1035,7 +1122,15 @@ if ( \
if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Post HT update for entry clean SC failed") \
}
@@ -1044,7 +1139,15 @@ if ( ( (cache_ptr)->index_size != \
if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
- ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Post HT update for entry dirty SC failed") \
}
@@ -1070,30 +1173,38 @@ if ( ( (cache_ptr)->index_size != \
#endif /* H5C_DO_SANITY_CHECKS */
-#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
-{ \
- int k; \
- H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
- k = H5C__HASH_FCN((entry_ptr)->addr); \
- if ( ((cache_ptr)->index)[k] == NULL ) \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- else { \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- } \
- (cache_ptr)->index_len++; \
- (cache_ptr)->index_size += (entry_ptr)->size; \
- if ( (entry_ptr)->is_dirty ) \
- (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
- else \
- (cache_ptr)->clean_index_size += (entry_ptr)->size; \
- if ((entry_ptr)->flush_me_last) { \
- (cache_ptr)->num_last_entries++; \
- HDassert((cache_ptr)->num_last_entries <= 2); \
- } \
- H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
- H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
+#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+ k = H5C__HASH_FCN((entry_ptr)->addr); \
+ if ( ((cache_ptr)->index)[k] == NULL ) \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ else { \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ } \
+ (cache_ptr)->index_len++; \
+ (cache_ptr)->index_size += (entry_ptr)->size; \
+ ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ if ( (entry_ptr)->is_dirty ) { \
+ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ } else { \
+ (cache_ptr)->clean_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ } \
+ if ((entry_ptr)->flush_me_last) { \
+ (cache_ptr)->num_last_entries++; \
+ HDassert((cache_ptr)->num_last_entries <= 2); \
+ } \
+ H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
+ H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
}
#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \
@@ -1111,10 +1222,18 @@ if ( ( (cache_ptr)->index_size != \
(entry_ptr)->ht_prev = NULL; \
(cache_ptr)->index_len--; \
(cache_ptr)->index_size -= (entry_ptr)->size; \
- if ( (entry_ptr)->is_dirty ) \
+ ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ if ( (entry_ptr)->is_dirty ) { \
(cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
- else \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ } else { \
(cache_ptr)->clean_index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ } \
if ((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries--; \
HDassert((cache_ptr)->num_last_entries <= 1); \
@@ -1182,7 +1301,11 @@ if ( ( (cache_ptr)->index_size != \
{ \
H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
(cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
(cache_ptr)->clean_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
}
@@ -1190,27 +1313,39 @@ if ( ( (cache_ptr)->index_size != \
{ \
H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
(cache_ptr)->clean_index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
(cache_ptr)->dirty_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
}
-#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean) \
-{ \
- H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean) \
- (cache_ptr)->index_size -= (old_size); \
- (cache_ptr)->index_size += (new_size); \
- if ( was_clean ) \
- (cache_ptr)->clean_index_size -= (old_size); \
- else \
- (cache_ptr)->dirty_index_size -= (old_size); \
- if ( (entry_ptr)->is_dirty ) \
- (cache_ptr)->dirty_index_size += (new_size); \
- else \
- (cache_ptr)->clean_index_size += (new_size); \
- H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr) \
+#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
+{ \
+ H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
+ (cache_ptr)->index_size -= (old_size); \
+ (cache_ptr)->index_size += (new_size); \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) -= (old_size); \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) += (new_size); \
+ if ( was_clean ) { \
+ (cache_ptr)->clean_index_size -= (old_size); \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])-= (old_size); \
+ } else { \
+ (cache_ptr)->dirty_index_size -= (old_size); \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])-= (old_size); \
+ } \
+ if ( (entry_ptr)->is_dirty ) { \
+ (cache_ptr)->dirty_index_size += (new_size); \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])+= (new_size); \
+ } else { \
+ (cache_ptr)->clean_index_size += (new_size); \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])+= (new_size); \
+ } \
+ H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr) \
}
@@ -1275,6 +1410,10 @@ if ( ( (cache_ptr)->index_size != \
* Added code to set cache_ptr->slist_changed to TRUE
* when an entry is inserted in the slist.
*
+ * JRM -- 9/1/15
+ * Added code to maintain the cache_ptr->slist_ring_len
+ * and cache_ptr->slist_ring_size arrays.
+ *
*-------------------------------------------------------------------------
*/
@@ -1296,6 +1435,12 @@ if ( ( (cache_ptr)->index_size != \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
@@ -1305,6 +1450,8 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len++; \
(cache_ptr)->slist_size += (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
(cache_ptr)->slist_len_increase++; \
(cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
\
@@ -1324,6 +1471,12 @@ if ( ( (cache_ptr)->index_size != \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
@@ -1333,6 +1486,8 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len++; \
(cache_ptr)->slist_size += (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
HDassert( (cache_ptr)->slist_size > 0 ); \
@@ -1375,32 +1530,46 @@ if ( ( (cache_ptr)->index_size != \
* Added code to set cache_ptr->slist_changed to TRUE
* when an entry is removed from the slist.
*
+ * JRM -- 9/1/15
+ * Added code to maintain the cache_ptr->slist_ring_len
+ * and cache_ptr->slist_ring_size arrays.
+ *
*-------------------------------------------------------------------------
*/
-#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_protected) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( (entry_ptr)->in_slist ); \
- HDassert( (cache_ptr)->slist_ptr ); \
- \
- if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
- != (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
- "Can't delete entry from skip list.") \
- \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len--; \
- HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
- (cache_ptr)->slist_size -= (entry_ptr)->size; \
- (entry_ptr)->in_slist = FALSE; \
+#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( (entry_ptr)->in_slist ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
+ != (entry_ptr) ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "Can't delete entry from skip list.") \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len--; \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ (cache_ptr)->slist_size -= (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
+ (entry_ptr)->size ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size; \
+ (entry_ptr)->in_slist = FALSE; \
} /* H5C__REMOVE_ENTRY_FROM_SLIST */
@@ -1432,52 +1601,76 @@ if ( ( (cache_ptr)->index_size != \
* in this case, as the structure of the slist is not
* modified.
*
+ * JRM -- 9/1/15
+ * Added code to maintain the cache_ptr->slist_ring_len
+ * and cache_ptr->slist_ring_size arrays.
+ *
*-------------------------------------------------------------------------
*/
#if H5C_DO_SANITY_CHECKS
-#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (old_size) > 0 ); \
- HDassert( (new_size) > 0 ); \
- HDassert( (old_size) <= (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( ((cache_ptr)->slist_len > 1) || \
- ( (cache_ptr)->slist_size == (old_size) ) ); \
- \
- (cache_ptr)->slist_size -= (old_size); \
- (cache_ptr)->slist_size += (new_size); \
- \
- (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
- (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
- \
- HDassert( (new_size) <= (cache_ptr)->slist_size ); \
- HDassert( ( (cache_ptr)->slist_len > 1 ) || \
- ( (cache_ptr)->slist_size == (new_size) ) ); \
+#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
+ \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >=(old_size) ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
+ \
+ (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
+ (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
+ \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#else /* H5C_DO_SANITY_CHECKS */
-#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (old_size) > 0 ); \
- HDassert( (new_size) > 0 ); \
- HDassert( (old_size) <= (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( ((cache_ptr)->slist_len > 1) || \
- ( (cache_ptr)->slist_size == (old_size) ) ); \
- \
- (cache_ptr)->slist_size -= (old_size); \
- (cache_ptr)->slist_size += (new_size); \
- \
- HDassert( (new_size) <= (cache_ptr)->slist_size ); \
- HDassert( ( (cache_ptr)->slist_len > 1 ) || \
- ( (cache_ptr)->slist_size == (new_size) ) ); \
+#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
+ \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >=(old_size) ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
+ \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -2854,6 +3047,16 @@ if ( ( (cache_ptr)->index_size != \
* index_size by three should yield a conservative estimate
* of the cache's memory footprint.
*
+ * index_ring_len: Array of integer of length H5C_RING_NTYPES used to
+ * maintain a count of entries in the index by ring. Note
+ * that the sum of all the cells in this array must equal
+ * the value stored in index_len above.
+ *
+ * index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
+ * maintain the sum of the sizes of all entries in the index
+ * by ring. Note that the sum of all cells in this array must
+ * equal the value stored in index_size above.
+ *
* clean_index_size: Number of bytes of clean entries currently stored in
* the hash table. Note that the index_size field (above)
* is also the sum of the sizes of all entries in the cache.
@@ -2868,12 +3071,22 @@ if ( ( (cache_ptr)->index_size != \
* clean_index_size plus the amount of empty space (if any)
* in the cache.
*
+ * clean_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
+ * maintain the sum of the sizes of all clean entries in the
+ * index by ring. Note that the sum of all cells in this array
+ * must equal the value stored in clean_index_size above.
+ *
* dirty_index_size: Number of bytes of dirty entries currently stored in
* the hash table. Note that the index_size field (above)
* is also the sum of the sizes of all entries in the cache.
* Thus we should have the invariant that clean_index_size +
* dirty_index_size == index_size.
*
+ * dirty_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
+ * maintain the sum of the sizes of all dirty entries in the
+ * index by ring. Note that the sum of all cells in this array
+ * must equal the value stored in dirty_index_size above.
+ *
* index: Array of pointer to H5C_cache_entry_t of size
* H5C__HASH_TABLE_LEN. At present, this value is a power
* of two, not the usual prime number.
@@ -2962,6 +3175,16 @@ if ( ( (cache_ptr)->index_size != \
* skip list used to maintain a sorted list of
* dirty entries in the cache.
*
+ * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to
+ * maintain a count of entries in the slist by ring. Note
+ * that the sum of all the cells in this array must equal
+ * the value stored in slist_len above.
+ *
+ * slist_ring_size: Array of size_t of length H5C_RING_NTYPES used to
+ * maintain the sum of the sizes of all entries in the
+ * slist by ring. Note that the sum of all cells in this
+ * array must equal the value stored in slist_size above.
+ *
* slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
* list of dirty entries in the cache. This sorted list has
* two uses:
@@ -3602,8 +3825,12 @@ struct H5C_t {
/* Fields for maintaining [hash table] index of entries */
int32_t index_len;
size_t index_size;
+ int32_t index_ring_len[H5C_RING_NTYPES];
+ size_t index_ring_size[H5C_RING_NTYPES];
size_t clean_index_size;
+ size_t clean_index_ring_size[H5C_RING_NTYPES];
size_t dirty_index_size;
+ size_t dirty_index_ring_size[H5C_RING_NTYPES];
H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
/* Fields to detect entries removed during scans */
@@ -3619,6 +3846,8 @@ struct H5C_t {
hbool_t slist_change_in_serialize;
int32_t slist_len;
size_t slist_size;
+ int32_t slist_ring_len[H5C_RING_NTYPES];
+ size_t slist_ring_size[H5C_RING_NTYPES];
H5SL_t * slist_ptr;
int32_t num_last_entries;
#if H5C_DO_SANITY_CHECKS
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 3cd74c1..2a3efc9 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -1115,6 +1115,60 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
/****************************************************************************
*
+ * H5C_ring_t & associated #defines
+ *
+ * The metadata cache uses the concept of rings to order the flushes of
+ * classes of entries. In this arrangement, each entry in the cache is
+ * assigned to a ring, and on flush, the members of the outermost ring
+ * are flushed first, followed by the next outermost, and so on with the
+ * members of the innermost ring being flushed last.
+ *
+ * Note that flush dependencies are used to order flushes within rings.
+ *
+ * Note also that at the conceptual level, rings are argueably superfluous,
+ * as a similar effect could be obtained via the flush dependency mechanism.
+ * However, this would require all entries in the cache to participate in a
+ * flush dependency -- with the implied setup and takedown overhead and
+ * added complexity. Further, the flush ordering between rings need only
+ * be enforced on flush operations, and thus the use of flush dependencies
+ * instead would apply unecessary constraints on flushes under normal
+ * operating circumstances.
+ *
+ * As of this writing, all metadata entries pretaining to data sets and
+ * groups must be flushed first, and are thus assigned to the outermost
+ * ring.
+ *
+ * Free space managers managing file space must be flushed next,
+ * and are assigned to the second outermost ring.
+ *
+ * The object header and associated chunks used to implement superblock
+ * extension messages must be flushed next, and are thus assigned to
+ * the third outermost ring.
+ *
+ * The superblock proper must be flushed last, and is thus assigned to
+ * the innermost ring.
+ *
+ * The H5C_ring_t and the associated #defines below are used to define
+ * the rings. Each entry must be assigned to the appropriate ring on
+ * insertion or protect.
+ *
+ * Note that H5C_ring_t was originally an enumerated type. It was
+ * converted to an integer and a set of #defines for convenience in
+ * debugging.
+ */
+
+#define H5C_RING_UNDEFINED 0 /* shouldn't appear in the cache */
+#define H5C_RING_USER 1 /* outermost ring */
+#define H5C_RING_FSM 2
+#define H5C_RING_SBE 4 /* temporarily merged with H5C_RING_SB */
+#define H5C_RING_SB 4 /* innermost ring */
+#define H5C_RING_NTYPES 5
+
+typedef int H5C_ring_t;
+
+
+/****************************************************************************
+ *
* structure H5C_cache_entry_t
*
* Instances of the H5C_cache_entry_t structure are used to store cache
@@ -1376,6 +1430,25 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
* is in the process of being flushed and destroyed.
*
*
+ * Fields supporting rings for flush ordering:
+ *
+ * All entries in the metadata cache are assigned to a ring. On cache
+ * flush, all entries in the outermost ring are flushed first, followed
+ * by all members of the next outermost ring, and so on until the
+ * innermost ring is flushed. Note that this ordering is ONLY applied
+ * in flush and serialize calls. Rings are ignored during normal operations
+ * in which entries are flushed as directed by the replacement policy.
+ *
+ * See the header comment on H5C_ring_t above for further details.
+ *
+ * Note that flush dependencies (see below) are used to order flushes
+ * within rings. Unlike rings, flush dependencies are applied to ALL
+ * writes, not just those triggered by flush or serialize calls.
+ *
+ * ring: Instance of H5C_ring_t indicating the ring to which this
+ * entry is assigned.
+ *
+ *
* Fields supporting the 'flush dependency' feature:
*
* Entries in the cache may have a 'flush dependency' on another entry in the
@@ -1543,6 +1616,9 @@ typedef struct H5C_cache_entry_t {
hbool_t flush_in_progress;
hbool_t destroy_in_progress;
+ /* fields supporting rings for purposes of flush ordering */
+ H5C_ring_t ring;
+
/* fields supporting the 'flush dependency' feature: */
struct H5C_cache_entry_t * flush_dep_parent;
uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
@@ -1917,6 +1993,7 @@ H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr,
unsigned int tests);
H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr);
H5_DLL void H5C_retag_copied_metadata(H5C_t *cache_ptr, haddr_t metadata_tag);
+H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t dxpl_id,
diff --git a/src/H5FScache.c b/src/H5FScache.c
index 9accfc4..455a96e 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -376,6 +376,8 @@ H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
unsigned *flags)
{
H5FS_t *fspace = (H5FS_t *)_thing; /* Pointer to the object */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
@@ -391,6 +393,16 @@ H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
HDassert(flags);
if(fspace->sinfo) {
+ H5AC_ring_t ring;
+
+ /* Retrieve the ring type for the header */
+ if(H5AC_get_entry_ring(f, addr, &ring) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "unable to get property value");
+
+ /* Set the ring type for the section info in the DXPL */
+ if(H5AC_set_ring(dxpl_id, ring, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* This implies that the header "owns" the section info.
*
* Unfortunately, the comments in the code are not clear as to
@@ -596,6 +608,10 @@ H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
*flags = 0;
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FSPACE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5FS__cache_hdr_pre_serialize() */
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 2262ccd..d353d4f 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -206,6 +206,8 @@ herr_t
H5F_super_ext_close(H5F_t *f, H5O_loc_t *ext_ptr, hid_t dxpl_id,
hbool_t was_created)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -216,6 +218,10 @@ H5F_super_ext_close(H5F_t *f, H5O_loc_t *ext_ptr, hid_t dxpl_id,
/* Check if extension was created */
if(was_created) {
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_SBE, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Increment link count on superblock extension's object header */
if(H5O_link(ext_ptr, 1, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_LINKCOUNT, FAIL, "unable to increment hard link count")
@@ -232,6 +238,10 @@ H5F_super_ext_close(H5F_t *f, H5O_loc_t *ext_ptr, hid_t dxpl_id,
f->nopen_objs--;
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5F_super_ext_close() */
@@ -256,7 +266,8 @@ done:
herr_t
H5F__super_read(H5F_t *f, hid_t dxpl_id)
{
- H5P_genplist_t *dxpl; /* DXPL object */
+ H5P_genplist_t *dxpl = NULL; /* DXPL object */
+ H5AC_ring_t ring, orig_ring = H5AC_RING_INV;
H5F_super_t * sblock = NULL; /* Superblock structure */
H5F_superblock_cache_ud_t udata; /* User data for cache callbacks */
H5P_genplist_t *c_plist; /* File creation property list */
@@ -276,6 +287,8 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
/* Get the DXPL plist object for DXPL ID */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if((H5P_get(dxpl, H5AC_RING_NAME, &orig_ring)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get property value");
/* Find the superblock */
if(H5FD_locate_signature(f->shared->lf, dxpl, &super_addr) < 0)
@@ -320,6 +333,11 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
udata.stored_eof = HADDR_UNDEF;
udata.drvrinfo_removed = FALSE;
+ /* Set the ring type in the DXPL */
+ ring = H5AC_RING_SB;
+ if((H5P_set(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set property value");
+
/* Look up the superblock */
if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
@@ -419,6 +437,11 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
* allocated so that it knows how to allocate additional memory.
*/
+ /* Set the ring type in the DXPL */
+ ring = H5AC_RING_SBE;
+ if((H5P_set(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set property value");
+
/* Decode the optional driver information block */
if(H5F_addr_defined(sblock->driver_addr)) {
H5O_drvinfo_t *drvinfo; /* Driver info */
@@ -647,6 +670,10 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
f->shared->sblock = sblock;
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Release the superblock */
if(sblock && H5AC_unprotect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to close superblock")
@@ -705,6 +732,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
H5O_drvinfo_t *drvinfo = NULL; /* Driver info */
hbool_t drvinfo_in_cache = FALSE; /* Whether the driver info block has been inserted into the metadata cache */
H5P_genplist_t *plist; /* File creation property list */
+ H5P_genplist_t *dxpl = NULL;
+ H5AC_ring_t ring, orig_ring = H5AC_RING_INV;
hsize_t userblock_size; /* Size of userblock, in bytes */
hsize_t superblock_size; /* Size of superblock, in bytes */
size_t driver_size; /* Size of driver info block (bytes) */
@@ -830,6 +859,10 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5F__set_eoa(f, H5FD_MEM_SUPER, superblock_size) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to set EOA value for superblock")
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_SB, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Insert superblock into cache, pinned */
if(H5AC_insert_entry(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, H5AC__PIN_ENTRY_FLAG | H5AC__FLUSH_LAST_FLAG | H5AC__FLUSH_COLLECTIVELY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "can't add superblock to cache")
@@ -875,6 +908,11 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
else
need_ext = FALSE;
+ /* Set the ring type in the DXPL */
+ ring = H5AC_RING_SBE;
+ if((H5P_set(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Create the superblock extension for "extra" superblock data, if necessary. */
if(need_ext) {
/* The superblock extension isn't actually a group, but the
@@ -976,6 +1014,10 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
} /* end if */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Close superblock extension, if it was created */
if(ext_created && H5F_super_ext_close(f, &ext_loc, dxpl_id, ext_created) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "unable to close file's superblock extension")
@@ -1114,6 +1156,8 @@ H5F__super_free(H5F_super_t *sblock)
herr_t
H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -1138,6 +1182,10 @@ H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext
ext_loc.file = f;
ext_loc.addr = f->shared->sblock->ext_addr;
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_SBE, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Get object header info for superblock extension */
if(H5O_get_hdr_info(&ext_loc, dxpl_id, &hdr_info) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to retrieve superblock extension info")
@@ -1151,6 +1199,10 @@ H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext
} /* end if */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5F__super_size() */
@@ -1169,6 +1221,8 @@ done:
herr_t
H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, void *mesg, unsigned id, hbool_t may_create)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
hbool_t ext_created = FALSE; /* Whether superblock extension was created */
hbool_t ext_opened = FALSE; /* Whether superblock extension was opened */
H5O_loc_t ext_loc; /* "Object location" for superblock extension */
@@ -1182,6 +1236,10 @@ H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, void *mesg, unsigned id, hbool_
HDassert(f->shared);
HDassert(f->shared->sblock);
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_SBE, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Open/create the superblock extension object header */
if(H5F_addr_defined(f->shared->sblock->ext_addr)) {
if(H5F_super_ext_open(f, f->shared->sblock->ext_addr, &ext_loc) < 0)
@@ -1219,6 +1277,10 @@ H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, void *mesg, unsigned id, hbool_
} /* end else */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Close the superblock extension, if it was opened */
if(ext_opened && H5F_super_ext_close(f, &ext_loc, dxpl_id, ext_created) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "unable to close file's superblock extension")
@@ -1245,6 +1307,8 @@ done:
herr_t
H5F_super_ext_remove_msg(H5F_t *f, hid_t dxpl_id, unsigned id)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
H5O_loc_t ext_loc; /* "Object location" for superblock extension */
hbool_t ext_opened = FALSE; /* Whether the superblock extension was opened */
int null_count = 0; /* # of null messages */
@@ -1256,6 +1320,10 @@ H5F_super_ext_remove_msg(H5F_t *f, hid_t dxpl_id, unsigned id)
/* Make sure that the superblock extension object header exists */
HDassert(H5F_addr_defined(f->shared->sblock->ext_addr));
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_SBE, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Open superblock extension object header */
if(H5F_super_ext_open(f, f->shared->sblock->ext_addr, &ext_loc) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "error in starting file's superblock extension")
@@ -1289,6 +1357,10 @@ H5F_super_ext_remove_msg(H5F_t *f, hid_t dxpl_id, unsigned id)
} /* end if */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Close superblock extension object header, if opened */
if(ext_opened && H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "unable to close file's superblock extension")
diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c
index ac1f840..477f58f 100644
--- a/src/H5Fsuper_cache.c
+++ b/src/H5Fsuper_cache.c
@@ -515,6 +515,8 @@ H5F__cache_superblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
size_t H5_ATTR_UNUSED *new_len, size_t H5_ATTR_UNUSED *new_compressed_len,
unsigned H5_ATTR_UNUSED *flags)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the super block */
herr_t ret_value = SUCCEED; /* Return value */
@@ -562,6 +564,10 @@ H5F__cache_superblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
if(H5FD_sb_encode(f->shared->lf, drvinfo.name, dbuf) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_SBE, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Write driver info information to the superblock extension */
drvinfo.len = driver_size;
drvinfo.buf = dbuf;
@@ -577,6 +583,10 @@ H5F__cache_superblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
} /* end if */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FS_cache_superblock_pre_serialize() */
diff --git a/src/H5MF.c b/src/H5MF.c
index 0c98654..8a227d7 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -237,6 +237,8 @@ H5MF_alloc_open(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
{
const H5FS_section_class_t *classes[] = { /* Free space section classes implemented for file */
H5MF_FSPACE_SECT_CLS_SIMPLE};
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -250,6 +252,10 @@ H5MF_alloc_open(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
HDassert(H5F_addr_defined(f->shared->fs_addr[type]));
HDassert(f->shared->fs_state[type] == H5F_FS_STATE_CLOSED);
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Open an existing free space structure for the file */
if(NULL == (f->shared->fs_man[type] = H5FS_open(f, dxpl_id, f->shared->fs_addr[type],
NELMTS(classes), classes, f, f->shared->alignment, f->shared->threshold)))
@@ -260,6 +266,10 @@ H5MF_alloc_open(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
f->shared->fs_state[type] = H5F_FS_STATE_OPEN;
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5MF_alloc_open() */
@@ -423,6 +433,8 @@ done:
haddr_t
H5MF_alloc(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, hsize_t size)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
H5FD_mem_t fs_type; /* Free space type (mapped from allocation type) */
haddr_t ret_value; /* Return value */
@@ -440,6 +452,10 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
/* Get free space type from allocation type */
fs_type = H5MF_ALLOC_TO_FS_TYPE(f, alloc_type);
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, HADDR_UNDEF, "unable to set ring value")
+
/* Check if we are using the free space manager for this file */
if(H5F_HAVE_FREE_SPACE_MANAGER(f)) {
/* Check if the free space manager for the file has been initialized */
@@ -512,6 +528,10 @@ HDfprintf(stderr, "%s: Check 2.0\n", FUNC);
HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed from aggr/vfd")
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, HADDR_UNDEF, "unable to set property value")
+
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", FUNC, ret_value, size);
#endif /* H5MF_ALLOC_DEBUG */
@@ -604,6 +624,8 @@ H5MF_xfree(const H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
H5F_io_info_t fio_info; /* I/O info for operation */
H5MF_free_section_t *node = NULL; /* Free space section pointer */
H5MF_sect_ud_t udata; /* User data for callback */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
H5FD_mem_t fs_type; /* Free space type (mapped from allocation type) */
herr_t ret_value = SUCCEED; /* Return value */
@@ -622,6 +644,10 @@ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUN
if(H5F_addr_le(f->shared->tmp_addr, addr))
HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, FAIL, "attempting to free temporary file space")
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Set up I/O info for operation */
fio_info.f = f;
if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
@@ -729,6 +755,10 @@ HDfprintf(stderr, "%s: After H5FS_sect_add()\n", FUNC);
} /* end else */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Release section node, if allocated and not added to section list or merged */
if(node)
if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
@@ -762,6 +792,8 @@ htri_t
H5MF_try_extend(H5F_t *f, hid_t dxpl_id, H5FD_mem_t alloc_type, haddr_t addr,
hsize_t size, hsize_t extra_requested)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
haddr_t end; /* End of block to extend */
H5FD_mem_t map_type; /* Mapped type */
htri_t ret_value; /* Return value */
@@ -781,6 +813,10 @@ HDfprintf(stderr, "%s: Entering: alloc_type = %u, addr = %a, size = %Hu, extra_r
/* Compute end of block to extend */
end = addr + size;
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Check if the block is exactly at the end of the file */
if((ret_value = H5FD_try_extend(f->shared->lf, map_type, f, end, extra_requested)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending file")
@@ -810,6 +846,10 @@ HDfprintf(stderr, "%s: Entering: alloc_type = %u, addr = %a, size = %Hu, extra_r
} /* end if */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", FUNC, ret_value);
#endif /* H5MF_ALLOC_DEBUG */
@@ -842,6 +882,8 @@ H5MF_sects_dump(f, dxpl_id, stderr);
herr_t
H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_size)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
haddr_t eoa; /* End of allocated space in the file */
haddr_t ma_addr = HADDR_UNDEF; /* Base "metadata aggregator" address */
hsize_t ma_size = 0; /* Size of "metadata aggregator" */
@@ -865,6 +907,10 @@ H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_si
if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, H5FD_MEM_DEFAULT)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Retrieve metadata aggregator info, if available */
if(H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query metadata aggregator stats")
@@ -953,6 +999,10 @@ H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_si
*meta_size = tot_meta_size;
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5MF_get_freespace() */
@@ -977,6 +1027,8 @@ H5MF_try_shrink(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
{
H5MF_free_section_t *node = NULL; /* Free space section pointer */
H5MF_sect_ud_t udata; /* User data for callback */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
htri_t ret_value; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -991,6 +1043,10 @@ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUN
HDassert(H5F_addr_defined(addr));
HDassert(size > 0);
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Create free space section for block */
if(NULL == (node = H5MF_sect_simple_new(addr, size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space section")
@@ -1012,6 +1068,10 @@ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUN
} /* end if */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Free section node allocated */
if(node && H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
HDONE_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free simple section node")
@@ -1104,6 +1164,8 @@ done:
herr_t
H5MF_close(H5F_t *f, hid_t dxpl_id)
{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
H5FD_mem_t type; /* Memory type for iteration */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1118,6 +1180,10 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
HDassert(f->shared->lf);
HDassert(f->shared->sblock);
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Free the space in aggregators */
/* (for space not at EOF, it may be put into free space managers) */
if(H5MF_free_aggrs(f, dxpl_id) < 0)
@@ -1280,6 +1346,10 @@ HDfprintf(stderr, "%s: Before deleting free space manager\n", FUNC);
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Leaving\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG */
@@ -1337,6 +1407,8 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
{
size_t total_sects = 0; /* total number of sections */
H5MF_sect_iter_ud_t sect_udata; /* User data for callback */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
H5FD_mem_t start_type, end_type; /* Memory types to iterate over */
H5FD_mem_t ty; /* Memory type for iteration */
ssize_t ret_value; /* Return value */
@@ -1363,6 +1435,10 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
sect_udata.sect_count = nsects;
sect_udata.sect_idx = 0;
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Iterate over memory types, retrieving the number of sections of each type */
for(ty = start_type; ty < end_type; H5_INC_ENUM(H5FD_mem_t, ty)) {
hbool_t fs_started = FALSE;
@@ -1406,6 +1482,10 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
ret_value = (ssize_t)total_sects;
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5MF_get_free_sections() */
diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c
index cea94e3..980b001 100644
--- a/src/H5Pdxpl.c
+++ b/src/H5Pdxpl.c
@@ -167,6 +167,11 @@
#define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF NULL
#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_SIZE sizeof(uint32_t)
#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF 0
+/* Ring type - private property */
+#define H5AC_XFER_RING_SIZE sizeof(unsigned)
+#define H5AC_XFER_RING_DEF H5AC_RING_US
+#define H5AC_XFER_RING_ENC H5P__encode_unsigned
+#define H5AC_XFER_RING_DEC H5P__decode_unsigned
/******************/
/* Local Typedefs */
@@ -271,6 +276,7 @@ static const hbool_t H5D_def_direct_chunk_flag_g = H5D_XFER_DIRECT_CHUNK_WRITE_F
static const uint32_t H5D_def_direct_chunk_filters_g = H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_DEF; /* Default value for the filters of direct chunk write */
static const hsize_t *H5D_def_direct_chunk_offset_g = H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF; /* Default value for the offset of direct chunk write */
static const uint32_t H5D_def_direct_chunk_datasize_g = H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF; /* Default value for the datasize of direct chunk write */
+static const H5AC_ring_t H5D_ring_g = H5AC_XFER_RING_DEF; /* Default value for the cache entry ring type */
/*-------------------------------------------------------------------------
@@ -465,6 +471,12 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register the data transform property */
+ if(H5P_register_real(pclass, H5AC_RING_NAME, H5AC_XFER_RING_SIZE, &H5D_ring_g,
+ NULL, NULL, NULL, H5AC_XFER_RING_ENC, H5AC_XFER_RING_DEC,
+ NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5P__dxfr_reg_prop() */
diff --git a/src/H5SM.c b/src/H5SM.c
index 1fc3b85..0ed4fbf 100644
--- a/src/H5SM.c
+++ b/src/H5SM.c
@@ -122,6 +122,8 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
{
H5O_shmesg_table_t sohm_table; /* SOHM message for superblock extension */
H5SM_master_table_t *table = NULL; /* SOHM master table for file */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t ring, orig_ring = H5AC_RING_INV; /* Original ring value */
haddr_t table_addr = HADDR_UNDEF; /* Address of SOHM master table in file */
unsigned list_max, btree_min; /* Phase change limits for SOHM indices */
unsigned index_type_flags[H5O_SHMESG_MAX_NINDEXES]; /* Messages types stored in each index */
@@ -136,6 +138,10 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
/* File should not already have a SOHM table */
HDassert(!H5F_addr_defined(H5F_SOHM_ADDR(f)));
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_US, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_SOHM, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Initialize master table */
if(NULL == (table = H5FL_MALLOC(H5SM_master_table_t)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, FAIL, "memory allocation failed for SOHM table")
@@ -220,6 +226,11 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
if(type_flags_used & H5O_SHMESG_ATTR_FLAG)
H5F_SET_STORE_MSG_CRT_IDX(f, TRUE);
+ /* Set the ring type to superblock extension */
+ ring = H5AC_RING_SBE;
+ if((H5P_set(dxpl, H5AC_RING_NAME, &ring)) < 0)
+ HGOTO_ERROR(H5E_SOHM, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Write shared message information to the superblock extension */
sohm_table.addr = H5F_SOHM_ADDR(f);
sohm_table.version = H5F_SOHM_VERS(f);
@@ -228,6 +239,10 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, FAIL, "unable to update SOHM header message")
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_SOHM, H5E_CANTSET, FAIL, "unable to set property value")
+
if(ret_value < 0) {
if(table_addr != HADDR_UNDEF)
H5MF_xfree(f, H5FD_MEM_SOHM_TABLE, dxpl_id, table_addr, (hsize_t)table->table_size);
@@ -1929,6 +1944,8 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
H5F_t *f = ext_loc->file; /* File pointer (convenience variable) */
H5O_shmesg_table_t sohm_table; /* SOHM message from superblock extension */
H5SM_master_table_t *table = NULL; /* SOHM master table */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
unsigned tmp_sohm_nindexes; /* Number of shared messages indexes in the table */
htri_t status; /* Status for message existing */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1969,6 +1986,10 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
/* Set up user data for callback */
cache_udata.f = f;
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_US, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_SOHM, H5E_CANTSET, FAIL, "unable to set ring value")
+
/* Read the rest of the SOHM table information from the cache */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
@@ -2020,6 +2041,10 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
} /* end else */
done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_SOHM, H5E_CANTSET, FAIL, "unable to set property value")
+
/* Release the master SOHM table if we took it out of the cache */
if(table && H5AC_unprotect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), table, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_SOHM, H5E_CANTUNPROTECT, FAIL, "unable to close SOHM master table")
diff --git a/test/cache_common.c b/test/cache_common.c
index f3da908..a3287cf 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -3061,7 +3061,7 @@ setup_cache(size_t max_cache_size,
if ( pass ) { /* allocate space for test entries */
- actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, H5P_DEFAULT,
+ actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, H5P_DATASET_XFER_DEFAULT,
(hsize_t)(ADDR_SPACE_SIZE + BASE_ADDR));
if ( actual_base_addr == HADDR_UNDEF ) {
@@ -3171,7 +3171,7 @@ takedown_cache(H5F_t * file_ptr,
HDassert ( file_ptr );
}
- H5MF_xfree(file_ptr, H5FD_MEM_DEFAULT, H5P_DEFAULT, saved_actual_base_addr,
+ H5MF_xfree(file_ptr, H5FD_MEM_DEFAULT, H5P_DATASET_XFER_DEFAULT, saved_actual_base_addr,
(hsize_t)(ADDR_SPACE_SIZE + BASE_ADDR));
saved_actual_base_addr = HADDR_UNDEF;
}
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index bb1ccc6..42788f4 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -4202,7 +4202,7 @@ setup_cache_for_test(hid_t * fid_ptr,
*/
if ( success ) { /* allocate space for test entries */
- actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, H5P_DEFAULT,
+ actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, H5P_DATASET_XFER_DEFAULT,
(hsize_t)(max_addr + BASE_ADDR));
if ( actual_base_addr == HADDR_UNDEF ) {