summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike McGreevy <mamcgree@hdfgroup.org>2009-01-09 19:53:30 (GMT)
committerMike McGreevy <mamcgree@hdfgroup.org>2009-01-09 19:53:30 (GMT)
commit041e7dbfed0e6787d09cd1ad9f4dfa09b936ec31 (patch)
treeebc775e7844b267f6217410046a90dd0556162f8
parent307f67d52582c2ca9d59acd5b0f53511f0493ad2 (diff)
downloadhdf5-041e7dbfed0e6787d09cd1ad9f4dfa09b936ec31.zip
hdf5-041e7dbfed0e6787d09cd1ad9f4dfa09b936ec31.tar.gz
hdf5-041e7dbfed0e6787d09cd1ad9f4dfa09b936ec31.tar.bz2
[svn-r16290] Purpose:
Adding code to maintain a min_clean_fraction in the cache in serial mode. Description: The metadata cache now has the ability to maintain a min_clean_fraction when in serial mode. The default initial cache size has been changed from 1MB to 2MB, and the default min_clean_fraction has been set at 30%. This check-in includes modifications to H5C.c to support maintaining a min_clean_size, including the addition of clean_index_size and dirty_index_size trackers, modifications to the H5C_make_space_in_cache algorithm, as well as associated test code and additional statistics tracking variables. Maintaining the min_clean_fraction addresses the possibility of experiencing a "metadata blizzard" when the cache gets completely full with dirty entries. Upon having to make space, the cache would previously need to flush every single entry in the cache before coming across a clean entry which could be evicted. This resulted in unnecessary flushing of oftentimes hot entries in the cache. Maintaining the min_clean_fraction ensures that, when space is needed, clean entries are more readily available to evict. Tested: jam, smirom, linew (h5committest)
-rw-r--r--release_docs/RELEASE.txt2
-rw-r--r--src/H5ACprivate.h4
-rw-r--r--src/H5C.c819
-rw-r--r--src/H5Cpkg.h66
-rw-r--r--test/cache.c1609
-rw-r--r--test/cache_common.c63
6 files changed, 2347 insertions, 216 deletions
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index 75433fe..51320d0 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -202,6 +202,8 @@ Bug Fixes since HDF5-1.8.0 release
ID correctly. (QAK - 2008/03/11)
- H5Dset_extent: when shrinking dimensions, some chunks were not deleted.
(PVN - 2009/01/8)
+ - Added code to maintain a min_clean_fraction in the metadata cache when
+ in serial mode. (MAM - 2009/01/9)
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 4a6f9f3..66bdf2d 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -208,8 +208,8 @@ extern hid_t H5AC_ind_dxpl_id;
/* char trace_file_name[] = */ "", \
/* hbool_t evictions_enabled = */ TRUE, \
/* hbool_t set_initial_size = */ TRUE, \
- /* size_t initial_size = */ ( 1 * 1024 * 1024), \
- /* double min_clean_fraction = */ 0.5, \
+ /* size_t initial_size = */ ( 2 * 1024 * 1024), \
+ /* double min_clean_fraction = */ 0.3, \
/* size_t max_size = */ (16 * 1024 * 1024), \
/* size_t min_size = */ ( 1 * 1024 * 1024), \
/* long int epoch_length = */ 50000, \
diff --git a/src/H5C.c b/src/H5C.c
index a6a6240..57674f2 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -584,6 +584,14 @@ if ( ( (entry_ptr) == NULL ) || \
((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \
if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
(cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->clean_index_size > \
+ (cache_ptr)->max_clean_index_size ) \
+ (cache_ptr)->max_clean_index_size = \
+ (cache_ptr)->clean_index_size; \
+ if ( (cache_ptr)->dirty_index_size > \
+ (cache_ptr)->max_dirty_index_size ) \
+ (cache_ptr)->max_dirty_index_size = \
+ (cache_ptr)->dirty_index_size; \
if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
(cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
@@ -680,6 +688,14 @@ if ( ( (entry_ptr) == NULL ) || \
(cache_ptr)->max_index_len = (cache_ptr)->index_len; \
if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
(cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->clean_index_size > \
+ (cache_ptr)->max_clean_index_size ) \
+ (cache_ptr)->max_clean_index_size = \
+ (cache_ptr)->clean_index_size; \
+ if ( (cache_ptr)->dirty_index_size > \
+ (cache_ptr)->max_dirty_index_size ) \
+ (cache_ptr)->max_dirty_index_size = \
+ (cache_ptr)->dirty_index_size; \
if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
(cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
@@ -709,6 +725,14 @@ if ( ( (entry_ptr) == NULL ) || \
(cache_ptr)->max_index_len = (cache_ptr)->index_len; \
if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
(cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->clean_index_size > \
+ (cache_ptr)->max_clean_index_size ) \
+ (cache_ptr)->max_clean_index_size = \
+ (cache_ptr)->clean_index_size; \
+ if ( (cache_ptr)->dirty_index_size > \
+ (cache_ptr)->max_dirty_index_size ) \
+ (cache_ptr)->max_dirty_index_size = \
+ (cache_ptr)->dirty_index_size; \
if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
(cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
@@ -761,6 +785,14 @@ if ( ( (entry_ptr) == NULL ) || \
(cache_ptr)->max_index_len = (cache_ptr)->index_len; \
if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
(cache_ptr)->max_index_size = (cache_ptr)->index_size; \
+ if ( (cache_ptr)->clean_index_size > \
+ (cache_ptr)->max_clean_index_size ) \
+ (cache_ptr)->max_clean_index_size = \
+ (cache_ptr)->clean_index_size; \
+ if ( (cache_ptr)->dirty_index_size > \
+ (cache_ptr)->max_dirty_index_size ) \
+ (cache_ptr)->max_dirty_index_size = \
+ (cache_ptr)->dirty_index_size; \
if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
(cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
@@ -784,6 +816,14 @@ if ( ( (entry_ptr) == NULL ) || \
if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
(cache_ptr)->max_index_len = (cache_ptr)->index_len; \
if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
+ if ( (cache_ptr)->clean_index_size > \
+ (cache_ptr)->max_clean_index_size ) \
+ (cache_ptr)->max_clean_index_size = \
+ (cache_ptr)->clean_index_size; \
+ if ( (cache_ptr)->dirty_index_size > \
+ (cache_ptr)->max_dirty_index_size ) \
+ (cache_ptr)->max_dirty_index_size = \
+ (cache_ptr)->dirty_index_size; \
(cache_ptr)->max_index_size = (cache_ptr)->index_size; \
if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
(cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
@@ -830,6 +870,14 @@ if ( ( (entry_ptr) == NULL ) || \
* When modifying these macros, remember to modify the similar macros
* in tst/cache.c
*
+ * Changes:
+ *
+ * - Updated existing index macros and sanity check macros to maintain
+ * the clean_index_size and dirty_index_size fields of H5C_t. Also
+ * added macros to allow us to track entry cleans and dirties.
+ *
+ * JRM -- 11/5/08
+ *
***********************************************************************/
/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */
@@ -849,7 +897,11 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev != NULL ) || \
( (entry_ptr)->size <= 0 ) || \
( (k = H5C__HASH_FCN((entry_ptr)->addr)) < 0 ) || \
- ( k >= H5C__HASH_TABLE_LEN ) ) { \
+ ( k >= H5C__HASH_TABLE_LEN ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) ) { \
+ HDassert(0); /* JRM */ \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
"Pre HT insert SC failed") \
}
@@ -871,13 +923,19 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev == NULL ) ) || \
( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \
(entry_ptr) ) && \
- ( (entry_ptr)->ht_prev != NULL ) ) ) { \
+ ( (entry_ptr)->ht_prev != NULL ) ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) ) { \
+ HDassert(0); /* JRM */ \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
}
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( ! H5F_addr_defined(Addr) ) || \
( H5C__HASH_FCN(Addr) < 0 ) || \
( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \
@@ -890,6 +948,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len < 1 ) || \
( (entry_ptr) == NULL ) || \
( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \
( (entry_ptr)->size <= 0 ) || \
( ((cache_ptr)->index)[k] == NULL ) || \
@@ -913,7 +973,8 @@ if ( ( (cache_ptr) == NULL ) || \
"Post HT shift to front SC failed") \
}
-#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
+#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len <= 0 ) || \
( (cache_ptr)->index_size <= 0 ) || \
@@ -921,22 +982,78 @@ if ( ( (cache_ptr) == NULL ) || \
( (old_size) > (cache_ptr)->index_size ) || \
( (new_size) <= 0 ) || \
( ( (cache_ptr)->index_len == 1 ) && \
- ( (cache_ptr)->index_size != (old_size) ) ) ) { \
+ ( (cache_ptr)->index_size != (old_size) ) ) || \
+ ( (entry_ptr) == NULL ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT entry size change SC failed") \
+} \
+HDassert( (cache_ptr)->index_size == \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ); \
+HDassert( (entry_ptr) != NULL ); \
+HDassert( ( ( was_clean ) && \
+ ( (cache_ptr)->clean_index_size >= (old_size) ) ) || \
+ ( ( ! (was_clean) ) && \
+ ( (cache_ptr)->dirty_index_size >= (old_size) ) ) );
+
+#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->index_len <= 0 ) || \
+ ( (cache_ptr)->index_size <= 0 ) || \
+ ( (new_size) > (cache_ptr)->index_size ) || \
+ ( ( (cache_ptr)->index_len == 1 ) && \
+ ( (cache_ptr)->index_size != (new_size) ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Post HT entry size change SC failed") \
+} \
+HDassert( (cache_ptr)->index_size == \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ); \
+HDassert( ( ( (entry_ptr)->is_dirty ) && \
+ ( (cache_ptr)->dirty_index_size >= (new_size) ) ) || \
+ ( ( ! ((entry_ptr)->is_dirty) ) && \
+ ( (cache_ptr)->clean_index_size >= (new_size) ) ) );
+
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
+{ \
+ HDassert( (cache_ptr) != NULL ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert( (cache_ptr)->index_len > 0 ); \
+ HDassert( (entry_ptr) != NULL ); \
+ HDassert( (entry_ptr)->is_dirty == FALSE ); \
+ HDassert( (cache_ptr)->index_size >= (entry_ptr)->size ); \
+ HDassert( (cache_ptr)->dirty_index_size >= (entry_ptr)->size ); \
+ HDassert( (cache_ptr)->index_size == \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ); \
+}
+/* JRM */
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
+{ \
+ HDassert( (cache_ptr) != NULL ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert( (cache_ptr)->index_len > 0 ); \
+ HDassert( (entry_ptr) != NULL ); \
+ HDassert( (entry_ptr)->is_dirty == TRUE ); \
+ HDassert( (cache_ptr)->index_size >= (entry_ptr)->size ); \
+ HDassert( (cache_ptr)->clean_index_size >= (entry_ptr)->size ); \
+ HDassert( (cache_ptr)->index_size == \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ); \
}
-#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
-if ( ( (cache_ptr) == NULL ) || \
- ( (cache_ptr)->index_len <= 0 ) || \
- ( (cache_ptr)->index_size <= 0 ) || \
- ( (new_size) > (cache_ptr)->index_size ) || \
- ( ( (cache_ptr)->index_len == 1 ) && \
- ( (cache_ptr)->index_size != (new_size) ) ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Post HT entry size change SC failed") \
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
+{ \
+ HDassert( (cache_ptr)->index_size == \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ); \
}
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
+{ \
+ HDassert( (cache_ptr)->index_size == \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ); \
+}
+
+
#else /* H5C_DO_SANITY_CHECKS */
#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
@@ -944,8 +1061,14 @@ if ( ( (cache_ptr) == NULL ) || \
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val)
#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val)
#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
-#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size)
-#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size)
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr)
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr)
+#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean)
+#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr)
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr)
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr)
#endif /* H5C_DO_SANITY_CHECKS */
@@ -967,6 +1090,11 @@ if ( ( (cache_ptr) == NULL ) || \
} \
(cache_ptr)->index_len++; \
(cache_ptr)->index_size += (entry_ptr)->size; \
+ if ( (entry_ptr)->is_dirty ) { \
+ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
+ } else { \
+ (cache_ptr)->clean_index_size += (entry_ptr)->size; \
+ } \
H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
}
@@ -991,6 +1119,11 @@ if ( ( (cache_ptr) == NULL ) || \
(entry_ptr)->ht_prev = NULL; \
(cache_ptr)->index_len--; \
(cache_ptr)->index_size -= (entry_ptr)->size; \
+ if ( (entry_ptr)->is_dirty ) { \
+ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
+ } else { \
+ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
+ } \
H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
}
@@ -1059,12 +1192,40 @@ if ( ( (cache_ptr) == NULL ) || \
} \
}
-#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
-{ \
- H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
- (cache_ptr)->index_size -= old_size; \
- (cache_ptr)->index_size += new_size; \
- H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size) \
+#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \
+{ \
+ H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
+ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
+ (cache_ptr)->clean_index_size += (entry_ptr)->size; \
+ H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
+}
+
+#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) \
+{ \
+ H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
+ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
+ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
+ H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
+}
+
+#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
+{ \
+ H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
+ (cache_ptr)->index_size -= (old_size); \
+ (cache_ptr)->index_size += (new_size); \
+ if ( was_clean ) { \
+ (cache_ptr)->clean_index_size -= (old_size); \
+ } else { \
+ (cache_ptr)->dirty_index_size -= (old_size); \
+ } \
+ if ( (entry_ptr)->is_dirty ) { \
+ (cache_ptr)->dirty_index_size += (new_size); \
+ } else { \
+ (cache_ptr)->clean_index_size += (new_size); \
+ } \
+ H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr) \
}
@@ -2847,6 +3008,10 @@ done:
* Added initialization for the new flash cache size increase
* related fields of H5C_t.
*
+ * JRM -- 11/5/08
+ * Added initialization for the new clean_index_size and
+ * dirty_index_size fields of H5C_t.
+ *
*-------------------------------------------------------------------------
*/
@@ -2922,6 +3087,8 @@ H5C_create(size_t max_cache_size,
cache_ptr->index_len = 0;
cache_ptr->index_size = (size_t)0;
+ cache_ptr->clean_index_size = (size_t)0;
+ cache_ptr->dirty_index_size = (size_t)0;
cache_ptr->slist_len = 0;
cache_ptr->slist_size = (size_t)0;
@@ -4624,6 +4791,29 @@ done:
* Added initialization for the new free_file_space_on_destroy
* field.
*
+ * JRM -- 11/13/08
+ * Moved test to see if we already have an entry with the
+ * specified address in the cache. This was necessary as
+ * we used to modify some fields in the entry to be inserted
+ * priort to this test, which got the cache confused if the
+ * insertion failed because the entry was already present.
+ *
+ * Also revised the function to call H5C_make_space_in_cache()
+ * if the min_clean_size is not met at present, not just if
+ * there is insufficient space in the cache for the new
+ * entry.
+ *
+ * The purpose of this modification is to avoid "metadata
+ * blizzards" in the write only case. In such instances,
+ * the cache was allowed to fill with dirty metadata. When
+ * we finally needed to evict an entry to make space, we had
+ * to flush out a whole cache full of metadata -- which has
+ * interesting performance effects. We hope to avoid (or
+ * perhaps more accurately hide) this effect by maintaining
+ * the min_clean_size, which should force us to start flushing
+ * entries long before we actually have to evict something
+ * to make space.
+ *
*-------------------------------------------------------------------------
*/
@@ -4644,6 +4834,7 @@ H5C_insert_entry(H5F_t * f,
hbool_t insert_pinned;
hbool_t set_flush_marker;
hbool_t write_permitted = TRUE;
+ size_t empty_space;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * test_entry_ptr;
@@ -4677,12 +4868,39 @@ H5C_insert_entry(H5F_t * f,
insert_pinned = ( (flags & H5C__PIN_ENTRY_FLAG) != 0 );
entry_ptr = (H5C_cache_entry_t *)thing;
+
+ /* verify that the new entry isn't already in the hash table -- scream
+ * and die if it is.
+ */
+
+ H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
+ if ( test_entry_ptr != NULL ) {
+
+ if ( test_entry_ptr == entry_ptr ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "entry already in cache.")
+
+ } else {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
+ "duplicate entry in cache.")
+ }
+ }
+
#ifndef NDEBUG
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
#endif /* NDEBUG */
entry_ptr->addr = addr;
entry_ptr->type = type;
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+
+ entry_ptr->is_pinned = insert_pinned;
+
/* newly inserted entries are assumed to be dirty */
entry_ptr->is_dirty = TRUE;
@@ -4730,13 +4948,35 @@ H5C_insert_entry(H5F_t * f,
}
}
- if ( ( cache_ptr->evictions_enabled ) &&
- ( (cache_ptr->index_size + entry_ptr->size) >
- cache_ptr->max_cache_size ) ) {
+ if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
+
+ empty_space = 0;
+
+ } else {
+
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ }
+
+ if ( ( cache_ptr->evictions_enabled )
+ &&
+ ( ( (cache_ptr->index_size + entry_ptr->size) >
+ cache_ptr->max_cache_size
+ )
+ ||
+ (
+ ( ( empty_space + cache_ptr->clean_index_size ) <
+ cache_ptr->min_clean_size )
+ )
+ )
+ ) {
size_t space_needed;
- cache_ptr->cache_full = TRUE;
+ if ( empty_space <= entry_ptr->size ) {
+
+ cache_ptr->cache_full = TRUE;
+ }
if ( cache_ptr->check_write_permitted != NULL ) {
@@ -4803,38 +5043,6 @@ H5C_insert_entry(H5F_t * f,
}
}
- /* verify that the new entry isn't already in the hash table -- scream
- * and die if it is.
- */
-
- H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
-
- if ( test_entry_ptr != NULL ) {
-
- if ( test_entry_ptr == entry_ptr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "entry already in cache.")
-
- } else {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "duplicate entry in cache.")
- }
- }
-
- /* we don't initialize the protected field until here as it is
- * possible that the entry is already in the cache, and already
- * protected. If it is, we don't want to make things worse by
- * marking it unprotected.
- */
-
- entry_ptr->is_protected = FALSE;
- entry_ptr->is_read_only = FALSE;
- entry_ptr->ro_ref_count = 0;
-
- entry_ptr->is_pinned = insert_pinned;
-
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
/* New entries are presumed to be dirty, so this if statement is
@@ -5209,6 +5417,19 @@ done:
* appropriate.
* JRM -- 1/11/08
*
+ *
+ * Added code to update the clean_index_size and
+ * dirty_index_size fields of H5C_t in cases where the
+ * the entry was clean on protect, was marked dirty in
+ * this call, and did not change its size. Do this via
+ * a call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY().
+ *
+ * If the size changed, this case is already dealt with by
+ * by the pre-existing call to
+ * H5C__UPDATE_INDEX_FOR_SIZE_CHANGE().
+ *
+ * JRM -- 11/5/08
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -5219,6 +5440,7 @@ H5C_mark_pinned_entry_dirty(H5C_t * cache_ptr,
{
herr_t ret_value = SUCCEED; /* Return value */
herr_t result;
+ hbool_t was_clean;
size_t size_increase;
H5C_cache_entry_t * entry_ptr;
@@ -5243,6 +5465,9 @@ H5C_mark_pinned_entry_dirty(H5C_t * cache_ptr,
"Entry is protected??")
}
+ /* make note of whether the entry was dirty to begin with */
+ was_clean = ! ( entry_ptr->is_dirty );
+
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
@@ -5278,8 +5503,8 @@ H5C_mark_pinned_entry_dirty(H5C_t * cache_ptr,
(entry_ptr->size), (new_size));
/* update the hash table */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
- (new_size));
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size), \
+ (new_size), (entry_ptr), (was_clean));
/* if the entry is in the skip list, update that too */
if ( entry_ptr->in_slist ) {
@@ -5294,6 +5519,10 @@ H5C_mark_pinned_entry_dirty(H5C_t * cache_ptr,
/* finally, update the entry size proper */
entry_ptr->size = new_size;
+
+ } else if ( ( was_clean ) && ( entry_ptr->is_dirty ) ) {
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
}
if ( ! (entry_ptr->in_slist) ) {
@@ -5341,6 +5570,12 @@ done:
* it once we deal with the problem of entries being protected
* read only, and then dirtied.
*
+ * JRM -- 11/5/08
+ * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY() to
+ * update the new clean_index_size and dirty_index_size
+ * fields of H5C_t in the case that the entry was clean
+ * prior to this call, and is pinned and not protected.
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -5348,6 +5583,7 @@ H5C_mark_pinned_or_protected_entry_dirty(H5C_t * cache_ptr,
void * thing)
{
herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t was_pinned_unprotected_and_clean;
H5C_cache_entry_t * entry_ptr;
FUNC_ENTER_NOAPI(H5C_mark_pinned_or_protected_entry_dirty, FAIL)
@@ -5367,9 +5603,15 @@ H5C_mark_pinned_or_protected_entry_dirty(H5C_t * cache_ptr,
} else if ( entry_ptr->is_pinned ) {
+ was_pinned_unprotected_and_clean = ! ( entry_ptr->is_dirty );
+
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
+ if ( was_pinned_unprotected_and_clean ) {
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr);
+ }
if ( ! (entry_ptr->in_slist) ) {
@@ -5429,6 +5671,11 @@ done:
* Note that in this case H5C_flush_single_entry() will handle
* all these details for us.
*
+ * JRM -- 11/5/08
+ * On review this function looks like no change is needed to
+ * support the new clean_index_size and dirty_index_size
+ * fields of H5C_t.
+ *
*-------------------------------------------------------------------------
*/
@@ -5591,7 +5838,7 @@ done:
* Function: H5C_resize_pinned_entry
*
* Purpose: Resize a pinned entry. The target entry MUST be
- * be pinned, and MUST not be unprotected.
+ * be pinned, and MUST be unprotected.
*
* Resizing an entry dirties it, so if the entry is not
* already dirty, the function places the entry on the
@@ -5608,6 +5855,19 @@ done:
* appropriate.
* JRM -- 1/11/08
*
+ * Added code to update the clean_index_size and
+ * dirty_index_size fields of H5C_t in cases where the
+ * the entry was clean prior to this call, was marked dirty,
+ * and did not change its size. Do this via a call to
+ * H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY().
+ *
+ * If the size changed, this case is already dealt with by
+ * by the pre-existing call to
+ * H5C__UPDATE_INDEX_FOR_SIZE_CHANGE().
+ *
+ * JRM -- 11/5/08
+ *
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -5618,6 +5878,7 @@ H5C_resize_pinned_entry(H5C_t * cache_ptr,
/* const char * fcn_name = "H5C_resize_pinned_entry()"; */
herr_t ret_value = SUCCEED; /* Return value */
herr_t result;
+ hbool_t was_clean;
H5C_cache_entry_t * entry_ptr;
size_t size_increase;
@@ -5647,6 +5908,9 @@ H5C_resize_pinned_entry(H5C_t * cache_ptr,
"Entry is protected??")
}
+ /* make note of whether the entry was clean to begin with */
+ was_clean = ! ( entry_ptr->is_dirty );
+
/* resizing dirties entries -- mark the entry as dirty if it
* isn't already
*/
@@ -5685,7 +5949,7 @@ H5C_resize_pinned_entry(H5C_t * cache_ptr,
/* update the hash table */
H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
- (new_size));
+ (new_size), (entry_ptr), (was_clean));
/* if the entry is in the skip list, update that too */
if ( entry_ptr->in_slist ) {
@@ -5700,8 +5964,13 @@ H5C_resize_pinned_entry(H5C_t * cache_ptr,
/* finally, update the entry size proper */
entry_ptr->size = new_size;
+
+ } else if ( was_clean ) {
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
}
+
if ( ! (entry_ptr->in_slist) ) {
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
@@ -5872,6 +6141,22 @@ done:
* Added to do a flash cache size increase if appropriate
* when a large entry is loaded.
*
+ * JRM -- 11/13/08
+ * Modified function to call H5C_make_space_in_cache() when
+ * the min_clean_size is violated, not just when there isn't
+ * enough space for and entry that has just been loaded.
+ *
+ * The purpose of this modification is to avoid "metadata
+ * blizzards" in the write only case. In such instances,
+ * the cache was allowed to fill with dirty metadata. When
+ * we finally needed to evict an entry to make space, we had
+ * to flush out a whole cache full of metadata -- which has
+ * interesting performance effects. We hope to avoid (or
+ * perhaps more accurately hide) this effect by maintaining
+ * the min_clean_size, which should force us to start flushing
+ * entries long before we actually have to evict something
+ * to make space.
+ *
*-------------------------------------------------------------------------
*/
@@ -5893,6 +6178,7 @@ H5C_protect(H5F_t * f,
hbool_t read_only = FALSE;
hbool_t write_permitted;
herr_t result;
+ size_t empty_space;
void * thing;
H5C_cache_entry_t * entry_ptr;
void * ret_value; /* Return value */
@@ -5960,16 +6246,41 @@ H5C_protect(H5F_t * f,
}
}
- /* try to free up some space if necessary and if evictions are
- * permitted
+ if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
+
+ empty_space = 0;
+
+ } else {
+
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ }
+
+ /* try to free up if necceary and if evictions are permitted. Note
+ * that if evictions are enabled, we will call H5C_make_space_in_cache()
+ * regardless if the min_free_space requirement is not met.
*/
- if ( ( cache_ptr->evictions_enabled ) &&
- ( (cache_ptr->index_size + entry_ptr->size) >
- cache_ptr->max_cache_size ) ) {
+
+ if ( ( cache_ptr->evictions_enabled )
+ &&
+ ( ( (cache_ptr->index_size + entry_ptr->size) >
+ cache_ptr->max_cache_size
+ )
+ ||
+ (
+ ( ( empty_space + cache_ptr->clean_index_size ) <
+ cache_ptr->min_clean_size )
+ )
+ )
+ ) {
size_t space_needed;
- cache_ptr->cache_full = TRUE;
+ if ( empty_space <= entry_ptr->size ) {
+
+ cache_ptr->cache_full = TRUE;
+
+ }
if ( cache_ptr->check_write_permitted != NULL ) {
@@ -6160,10 +6471,31 @@ H5C_protect(H5F_t * f,
/* check to see if the cache is now oversized due to the cache
* size reduction. If it is, try to evict enough entries to
* bring the cache size down to the current maximum cache size.
+ *
+ * Also, if the min_clean_size requirement is not met, we
+ * should also call H5C_make_space_in_cache() to bring us
+ * into complience.
*/
- if ( cache_ptr->index_size > cache_ptr->max_cache_size ) {
- cache_ptr->cache_full = TRUE;
+ if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
+
+ empty_space = 0;
+
+ } else {
+
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ }
+
+ if ( ( cache_ptr->index_size > cache_ptr->max_cache_size )
+ ||
+ ( ( empty_space + cache_ptr->clean_index_size ) <
+ cache_ptr->min_clean_size) ) {
+
+ if ( cache_ptr->index_size > cache_ptr->max_cache_size ) {
+
+ cache_ptr->cache_full = TRUE;
+ }
result = H5C_make_space_in_cache(f, primary_dxpl_id,
secondary_dxpl_id, cache_ptr,
@@ -6761,6 +7093,15 @@ done:
* Added code supporting the new write_protects,
* read_protects, and max_read_protects fields.
*
+ * JRM -- 11/13/08
+ * Added code displaying the max_clean_index_size and
+ * max_dirty_index_size.
+ *
+ * MAM -- 01/06/09
+ * Added code displaying the calls_to_msic,
+ * total_entries_skipped_in_msic, total_entries_scanned_in_msic,
+ * and max_entries_skipped_in_msic fields.
+ *
*-------------------------------------------------------------------------
*/
@@ -6808,6 +7149,8 @@ H5C_stats(H5C_t * cache_ptr,
double hit_rate;
double average_successful_search_depth = 0.0;
double average_failed_search_depth = 0.0;
+ double average_entries_skipped_per_calls_to_msic = 0.0;
+ double average_entries_scanned_per_calls_to_msic = 0.0;
#endif /* H5C_COLLECT_CACHE_STATS */
FUNC_ENTER_NOAPI(H5C_stats, FAIL)
@@ -6927,6 +7270,14 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->max_index_len));
HDfprintf(stdout,
+ "%s current (max) clean/dirty idx size = %ld (%ld) / %ld (%ld)\n",
+ cache_ptr->prefix,
+ (long)(cache_ptr->clean_index_size),
+ (long)(cache_ptr->max_clean_index_size),
+ (long)(cache_ptr->dirty_index_size),
+ (long)(cache_ptr->max_dirty_index_size));
+
+ HDfprintf(stdout,
"%s current (max) slist size / length = %ld (%ld) / %ld (%ld)\n",
cache_ptr->prefix,
(long)(cache_ptr->slist_size),
@@ -7024,6 +7375,41 @@ H5C_stats(H5C_t * cache_ptr,
(long)total_pinned_flushes,
(long)total_pinned_clears);
+ HDfprintf(stdout, "%s MSIC: (make space in cache) calls = %lld\n",
+ cache_ptr->prefix,
+ (long long)(cache_ptr->calls_to_msic));
+
+ if (cache_ptr->calls_to_msic > 0) {
+ average_entries_skipped_per_calls_to_msic =
+ (((double)(cache_ptr->total_entries_skipped_in_msic)) /
+ ((double)(cache_ptr->calls_to_msic)));
+ }
+
+ HDfprintf(stdout, "%s MSIC: Average/max entries skipped = %lf / %ld\n",
+ cache_ptr->prefix,
+ (float)average_entries_skipped_per_calls_to_msic,
+ (long)(cache_ptr->max_entries_skipped_in_msic));
+
+ if (cache_ptr->calls_to_msic > 0) {
+ average_entries_scanned_per_calls_to_msic =
+ (((double)(cache_ptr->total_entries_scanned_in_msic)) /
+ ((double)(cache_ptr->calls_to_msic)));
+ }
+
+ HDfprintf(stdout, "%s MSIC: Average/max entries scanned = %lf / %ld\n",
+ cache_ptr->prefix,
+ (float)average_entries_scanned_per_calls_to_msic,
+ (long)(cache_ptr->max_entries_scanned_in_msic));
+
+ HDfprintf(stdout, "%s MSIC: Scanned to make space(evict) = %lld\n",
+ cache_ptr->prefix,
+ (long long)(cache_ptr->entries_scanned_to_make_space));
+
+ HDfprintf(stdout, "%s MSIC: Scanned to satisfy min_clean = %lld\n",
+ cache_ptr->prefix,
+ (long long)(cache_ptr->total_entries_scanned_in_msic -
+ cache_ptr->entries_scanned_to_make_space));
+
#if H5C_COLLECT_CACHE_ENTRY_STATS
HDfprintf(stdout, "%s aggregate max / min accesses = %d / %d\n",
@@ -7192,6 +7578,15 @@ done:
* Added initialization for the new write_protects,
* read_protects, and max_read_protects fields.
*
+ * JRM 11/13/08
+ * Added initialization for the new max_clean_index_size and
+ * max_dirty_index_size fields.
+ *
+ * MAM -- 01/06/09
+ * Added code to initalize the calls_to_msic,
+ * total_entries_skipped_in_msic, total_entries_scanned_in_msic,
+ * and max_entries_skipped_in_msic fields.
+ *
*-------------------------------------------------------------------------
*/
@@ -7249,6 +7644,8 @@ H5C_stats__reset(H5C_t UNUSED * cache_ptr)
cache_ptr->max_index_len = 0;
cache_ptr->max_index_size = (size_t)0;
+ cache_ptr->max_clean_index_size = (size_t)0;
+ cache_ptr->max_dirty_index_size = (size_t)0;
cache_ptr->max_slist_len = 0;
cache_ptr->max_slist_size = (size_t)0;
@@ -7259,6 +7656,13 @@ H5C_stats__reset(H5C_t UNUSED * cache_ptr)
cache_ptr->max_pel_len = 0;
cache_ptr->max_pel_size = (size_t)0;
+ cache_ptr->calls_to_msic = 0;
+ cache_ptr->total_entries_skipped_in_msic = 0;
+ cache_ptr->total_entries_scanned_in_msic = 0;
+ cache_ptr->max_entries_skipped_in_msic = 0;
+ cache_ptr->max_entries_scanned_in_msic = 0;
+ cache_ptr->entries_scanned_to_make_space = 0;
+
#if H5C_COLLECT_CACHE_ENTRY_STATS
for ( i = 0; i <= cache_ptr->max_type_id; i++ )
@@ -7437,6 +7841,17 @@ done:
* Separated "destroy entry" concept from "remove entry from
* cache" concept, by adding the 'take_ownership' flag.
*
+ * JRM -- 11/5/08
+ * Added code to update the clean_index_size and
+ * dirty_index_size fields of H5C_t in cases where the
+ * the entry was clean on protect, was marked dirty on
+ * unprotect, and did not change its size. Do this via
+ * a call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY().
+ *
+ * If the size changed, this case is already dealt with by
+ * by the pre-existing call to
+ * H5C__UPDATE_INDEX_FOR_SIZE_CHANGE().
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -7459,6 +7874,7 @@ H5C_unprotect(H5F_t * f,
hbool_t unpin_entry;
hbool_t free_file_space;
hbool_t take_ownership;
+ hbool_t was_clean;
#ifdef H5_HAVE_PARALLEL
hbool_t clear_entry = FALSE;
#endif /* H5_HAVE_PARALLEL */
@@ -7510,6 +7926,7 @@ H5C_unprotect(H5F_t * f,
* the entry.
*/
dirtied |= entry_ptr->dirtied;
+ was_clean = ! ( entry_ptr->is_dirty );
#if H5C_DO_EXTREME_SANITY_CHECKS
if ( H5C_validate_lru_list(cache_ptr) < 0 ) {
@@ -7646,8 +8063,9 @@ H5C_unprotect(H5F_t * f,
(entry_ptr->size), (new_size));
/* update the hash table */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
- (new_size));
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size), \
+ (new_size), (entry_ptr), \
+ (was_clean));
/* if the entry is in the skip list, update that too */
if ( entry_ptr->in_slist ) {
@@ -7663,7 +8081,11 @@ H5C_unprotect(H5F_t * f,
/* finally, update the entry size proper */
entry_ptr->size = new_size;
- }
+
+ } else if ( ( was_clean ) && ( entry_ptr->is_dirty ) ) {
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
+ }
/* Pin or unpin the entry as requested. */
if ( pin_entry ) {
@@ -9929,6 +10351,8 @@ H5C_flush_invalidate_cache(H5F_t * f,
done = TRUE;
HDassert( cache_ptr->index_size == 0 );
+ HDassert( cache_ptr->clean_index_size == 0 );
+ HDassert( cache_ptr->dirty_index_size == 0 );
HDassert( cache_ptr->slist_len == 0 );
HDassert( cache_ptr->slist_size == 0 );
HDassert( cache_ptr->pel_len == 0 );
@@ -10053,6 +10477,11 @@ done:
* cache" concept, by adding the 'take_ownership' flag and
* the "destroy_entry" variable.
*
+ * JRM -- 11/5/08
+ * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
+ * maintain the new clean_index_size and clean_index_size
+ * fields of H5C_t.
+ *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -10413,6 +10842,7 @@ H5C_flush_single_entry(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"unable to flush entry")
}
+
#ifdef H5_HAVE_PARALLEL
if ( flush_flags != H5C_CALLBACK__NO_FLAGS_SET ) {
@@ -10455,6 +10885,12 @@ H5C_flush_single_entry(H5F_t * f,
if ( ( ! destroy ) && ( entry_ptr->in_slist ) ) {
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
+
+ }
+
+ if ( ( ! destroy ) && ( was_dirty ) ) {
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
}
if ( ! destroy ) { /* i.e. if the entry still exists */
@@ -10489,10 +10925,18 @@ H5C_flush_single_entry(H5F_t * f,
HDassert( entry_ptr->size < H5C_MAX_ENTRY_SIZE );
- /* update the hash table for the size change*/
+ /* update the hash table for the size change
+ * We pass TRUE as the was_clean parameter, as we
+ * have already updated the clean and dirty index
+ * size fields for the fact that the entry has
+ * been flushed. (See above call to
+ * H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN()).
+ */
H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), \
(entry_ptr->size),\
- (new_size));
+ (new_size), \
+ (entry_ptr), \
+ (TRUE));
/* The entry can't be protected since we just flushed it.
* Thus we must update the replacement policy data
@@ -10769,6 +11213,27 @@ done:
* but one can argue that I should just scream and die if I
* ever detect the condidtion.
*
+ * JRM -- 11/13/08
+ * Modified function to always observe the min_clean_size
+ * whether we are maintaining the clean and dirt LRU lists
+ * or not. To do this, we had to add the new clean_index_size
+ * and dirty_index_size fields to H5C_t, and supporting code
+ * as needed throughout the cache.
+ *
+ * The purpose of this modification is to avoid "metadata
+ * blizzards" in the write only case. In such instances,
+ * the cache was allowed to fill with dirty metadata. When
+ * we finally needed to evict an entry to make space, we had
+ * to flush out a whole cache full of metadata -- which has
+ * interesting performance effects. We hope to avoid (or
+ * perhaps more accurately hide) this effect by maintaining
+ * the min_clean_size, which should force us to start flushing
+ * entries long before we actually have to evict something
+ * to make space.
+ *
+ * MAM -- 01/06/09
+ * Added code to maintain clean_entries_skipped and total_entries
+ * scanned statistics.
*-------------------------------------------------------------------------
*/
@@ -10783,13 +11248,15 @@ H5C_make_space_in_cache(H5F_t * f,
{
herr_t ret_value = SUCCEED; /* Return value */
herr_t result;
+#if H5C_COLLECT_CACHE_STATS
+ int32_t clean_entries_skipped = 0;
+ int32_t total_entries_scanned = 0;
+#endif /* H5C_COLLECT_CACHE_STATS */
int32_t entries_examined = 0;
int32_t initial_list_len;
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
size_t empty_space;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
hbool_t prev_is_dirty = FALSE;
- hbool_t entry_is_epoch_maker = FALSE;
+ hbool_t didnt_flush_entry = FALSE;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * next_ptr;
H5C_cache_entry_t * prev_ptr;
@@ -10800,16 +11267,36 @@ H5C_make_space_in_cache(H5F_t * f,
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( first_flush_ptr != NULL );
HDassert( ( *first_flush_ptr == TRUE ) || ( *first_flush_ptr == FALSE ) );
+ HDassert( cache_ptr->index_size ==
+ (cache_ptr->clean_index_size + cache_ptr->dirty_index_size) );
if ( write_permitted ) {
initial_list_len = cache_ptr->LRU_list_len;
+
entry_ptr = cache_ptr->LRU_tail_ptr;
- while ( ( (cache_ptr->index_size + space_needed)
- >
- cache_ptr->max_cache_size
- )
+ if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
+
+ empty_space = 0;
+
+ } else {
+
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ }
+
+ while ( ( ( (cache_ptr->index_size + space_needed)
+ >
+ cache_ptr->max_cache_size
+ )
+ ||
+ (
+ ( empty_space + cache_ptr->clean_index_size )
+ <
+ ( cache_ptr->min_clean_size )
+ )
+ )
&&
( entries_examined <= (2 * initial_list_len) )
&&
@@ -10830,10 +11317,18 @@ H5C_make_space_in_cache(H5F_t * f,
if ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) {
- entry_is_epoch_maker = FALSE;
+ didnt_flush_entry = FALSE;
if ( entry_ptr->is_dirty ) {
+#if H5C_COLLECT_CACHE_STATS
+ if ( (cache_ptr->index_size + space_needed)
+ >
+ cache_ptr->max_cache_size ) {
+
+ cache_ptr->entries_scanned_to_make_space++;
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
result = H5C_flush_single_entry(f,
primary_dxpl_id,
secondary_dxpl_id,
@@ -10843,7 +11338,12 @@ H5C_make_space_in_cache(H5F_t * f,
H5C__NO_FLAGS_SET,
first_flush_ptr,
FALSE);
- } else {
+ } else if ( (cache_ptr->index_size + space_needed)
+ >
+ cache_ptr->max_cache_size ) {
+#if H5C_COLLECT_CACHE_STATS
+ cache_ptr->entries_scanned_to_make_space++;
+#endif /* H5C_COLLECT_CACHE_STATS */
result = H5C_flush_single_entry(f,
primary_dxpl_id,
@@ -10854,13 +11354,31 @@ H5C_make_space_in_cache(H5F_t * f,
H5C__FLUSH_INVALIDATE_FLAG,
first_flush_ptr,
TRUE);
+ } else {
+
+ /* We have enough space so don't flush clean entry.
+ * Set result to SUCCEED to avoid triggering the error
+ * code below.
+ */
+#if H5C_COLLECT_CACHE_STATS
+ clean_entries_skipped++;
+#endif /* H5C_COLLECT_CACHE_STATS */
+ didnt_flush_entry = TRUE;
+ result = SUCCEED;
+
}
+
+#if H5C_COLLECT_CACHE_STATS
+ total_entries_scanned++;
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
} else {
/* Skip epoch markers. Set result to SUCCEED to avoid
* triggering the error code below.
*/
- entry_is_epoch_maker = TRUE;
+ didnt_flush_entry = TRUE;
result = SUCCEED;
}
@@ -10882,7 +11400,7 @@ H5C_make_space_in_cache(H5F_t * f,
}
#endif /* NDEBUG */
- if ( entry_is_epoch_maker ) {
+ if ( didnt_flush_entry ) {
entry_ptr = prev_ptr;
@@ -10913,121 +11431,52 @@ H5C_make_space_in_cache(H5F_t * f,
entries_examined++;
- }
-
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
- entries_examined = 0;
- initial_list_len = cache_ptr->dLRU_list_len;
- entry_ptr = cache_ptr->dLRU_tail_ptr;
-
- if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
-
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
-
- } else {
+ if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
- empty_space = 0;
- }
-
- while ( ( (cache_ptr->cLRU_list_size + empty_space)
- < cache_ptr->min_clean_size ) &&
- ( entries_examined <= initial_list_len ) &&
- ( entry_ptr != NULL )
- )
- {
- HDassert( ! (entry_ptr->is_protected) );
- HDassert( ! (entry_ptr->is_read_only) );
- HDassert( (entry_ptr->ro_ref_count) == 0 );
- HDassert( entry_ptr->is_dirty );
- HDassert( entry_ptr->in_slist );
-
- prev_ptr = entry_ptr->aux_prev;
-
- next_ptr = entry_ptr->aux_next;
-
- if ( prev_ptr != NULL ) {
-
- HDassert( prev_ptr->is_dirty );
- }
-
- result = H5C_flush_single_entry(f,
- primary_dxpl_id,
- secondary_dxpl_id,
- cache_ptr,
- entry_ptr->type,
- entry_ptr->addr,
- H5C__NO_FLAGS_SET,
- first_flush_ptr,
- FALSE);
+ empty_space = 0;
- if ( result < 0 ) {
+ } else {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
- "unable to flush entry")
- }
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- if ( prev_ptr != NULL ) {
-#ifndef NDEBUG
- if (prev_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC) {
+ }
+
+ HDassert( cache_ptr->index_size ==
+ (cache_ptr->clean_index_size +
+ cache_ptr->dirty_index_size) );
- /* something horrible has happened to *prev_ptr --
- * scream and die.
- */
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "*prev_ptr corrupt 2")
+#if H5C_COLLECT_CACHE_STATS
+ cache_ptr->calls_to_msic++;
- } else
-#endif /* #ifndef NDEBUG */
- if ( ( ! ( prev_ptr->is_dirty ) )
- ||
- ( prev_ptr->aux_next != next_ptr )
- ||
- ( prev_ptr->is_protected )
- ||
- ( prev_ptr->is_pinned ) ) {
-
- /* something has happened to the dirty LRU -- start over
- * from the tail.
- */
-#if 0 /* This debuging code may be useful in the future -- keep it for now. */
- if ( ! ( prev_ptr->is_dirty ) ) {
- HDfprintf(stdout, "%s: ! prev_ptr->is_dirty\n",
- fcn_name);
- }
- if ( prev_ptr->aux_next != next_ptr ) {
- HDfprintf(stdout, "%s: prev_ptr->next != next_ptr\n",
- fcn_name);
- }
- if ( prev_ptr->is_protected ) {
- HDfprintf(stdout, "%s: prev_ptr->is_protected\n",
- fcn_name);
- }
- if ( prev_ptr->is_pinned ) {
- HDfprintf(stdout, "%s:prev_ptr->is_pinned\n",
- fcn_name);
- }
+ cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
+ cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
- HDfprintf(stdout, "%s: re-starting scan of dirty list\n",
- fcn_name);
-#endif /* JRM */
- entry_ptr = cache_ptr->dLRU_tail_ptr;
+ if ( clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic ) {
- } else {
+ cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
+ }
- entry_ptr = prev_ptr;
+ if ( total_entries_scanned > cache_ptr->max_entries_scanned_in_msic ) {
- }
- } else {
+ cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
- entry_ptr = NULL;
+ HDassert( ( entries_examined > (2 * initial_list_len) ) ||
+ ( (cache_ptr->pl_size + cache_ptr->min_clean_size) >
+ cache_ptr->max_cache_size ) ||
+ ( ( cache_ptr->clean_index_size + empty_space )
+ >= cache_ptr->min_clean_size ) );
- }
+#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- entries_examined++;
- }
+ HDassert( ( entries_examined > (2 * initial_list_len) ) ||
+ ( cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size ) );
+ HDassert( ( entries_examined > (2 * initial_list_len) ) ||
+ ( cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size ) );
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index bdbb25e..5e7b1a7 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -203,6 +203,38 @@
* index_size by two should yield a conservative estimate
* of the cache's memory footprint.
*
+ * clean_index_size: Number of bytes of clean entries currently stored in
+ * the hash table. Note that the index_size field (above)
+ * is also the sum of the sizes of all entries in the cache.
+ * Thus we should have the invarient that clean_index_size +
+ * dirty_index_size == index_size.
+ *
+ * WARNING:
+ *
+ * 1) The clean_index_size field is not maintained by the
+ * index macros, as the hash table doesn't care whether
+ * the entry is clean or dirty. Instead the field is
+ * maintained in the H5C__UPDATE_RP macros.
+ *
+ * 2) The value of the clean_index_size must not be mistaken
+ * for the current clean size of the cache. Rather, the
+ * clean size of the cache is the current value of
+ * clean_index_size plus the amount of empty space (if any)
+ * in the cache.
+ *
+ * dirty_index_size: Number of bytes of dirty entries currently stored in
+ * the hash table. Note that the index_size field (above)
+ * is also the sum of the sizes of all entries in the cache.
+ * Thus we should have the invarient that clean_index_size +
+ * dirty_index_size == index_size.
+ *
+ * WARNING:
+ *
+ * 1) The dirty_index_size field is not maintained by the
+ * index macros, as the hash table doesn't care whether
+ * the entry is clean or dirty. Instead the field is
+ * maintained in the H5C__UPDATE_RP macros.
+ *
* index: Array of pointer to H5C_cache_entry_t of size
* H5C__HASH_TABLE_LEN. At present, this value is a power
* of two, not the usual prime number.
@@ -722,6 +754,12 @@
* max_index_size: Largest value attained by the index_size field in the
* current epoch.
*
+ * max_clean_index_size: Largest value attained by the clean_index_size field
+ * in the current epoch.
+ *
+ * max_dirty_index_size: Largest value attained by the dirty_index_size field
+ * in the current epoch.
+ *
* max_slist_len: Largest value attained by the slist_len field in the
* current epoch.
*
@@ -740,6 +778,23 @@
* max_pel_size: Largest value attained by the pel_size field in the
* current epoch.
*
+ * calls_to_msic: Total number of calls to H5C_make_space_in_cache
+ *
+ * total_entries_skipped_in_msic: Number of clean entries skipped while
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ *
+ * total_entries_scanned_in_msic: Number of clean entries skipped while
+ * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ *
+ * max_entries_skipped_in_msic: Maximum number of clean entries skipped
+ * in any one call to H5C_make_space_in_cache().
+ *
+ * max_entries_scanned_in_msic: Maximum number of entries scanned over
+ * in any one call to H5C_make_space_in_cache().
+ *
+ * entries_scanned_to_make_space: Number of entries scanned only when looking
+ * for entries to evict in order to make space in cache.
+
* The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
* and H5C_COLLECT_CACHE_ENTRY_STATS are true.
*
@@ -830,6 +885,8 @@ struct H5C_t
int32_t index_len;
size_t index_size;
+ size_t clean_index_size;
+ size_t dirty_index_size;
H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
@@ -923,6 +980,8 @@ struct H5C_t
int32_t max_index_len;
size_t max_index_size;
+ size_t max_clean_index_size;
+ size_t max_dirty_index_size;
int32_t max_slist_len;
size_t max_slist_size;
@@ -933,6 +992,13 @@ struct H5C_t
int32_t max_pel_len;
size_t max_pel_size;
+ int64_t calls_to_msic;
+ int64_t total_entries_skipped_in_msic;
+ int64_t total_entries_scanned_in_msic;
+ int32_t max_entries_skipped_in_msic;
+ int32_t max_entries_scanned_in_msic;
+ int64_t entries_scanned_to_make_space;
+
#if H5C_COLLECT_CACHE_ENTRY_STATS
int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
diff --git a/test/cache.c b/test/cache.c
index bc89102..24c1f6c 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -23,6 +23,12 @@
#include "H5Iprivate.h"
#include "H5ACprivate.h"
#include "cache_common.h"
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <math.h>
+
/* private function declarations: */
@@ -2987,6 +2993,7 @@ static void
check_flush_cache(void)
{
const char * fcn_name = "check_flush_cache";
+ hbool_t show_progress = FALSE;
H5C_t * cache_ptr = NULL;
TESTING("H5C_flush_cache() functionality");
@@ -3000,6 +3007,11 @@ check_flush_cache(void)
if ( pass ) {
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: reseting entries.\n", fcn_name);
+ }
+
reset_entries();
cache_ptr = setup_cache((size_t)(2 * 1024 * 1024),
@@ -3012,6 +3024,12 @@ check_flush_cache(void)
if ( pass ) {
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: calling check_flush_cache__empty_cache().\n",
+ fcn_name);
+ }
+
check_flush_cache__empty_cache(cache_ptr);
}
@@ -3021,21 +3039,45 @@ check_flush_cache(void)
if ( pass ) {
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: calling check_flush_cache__single_entry().\n",
+ fcn_name);
+ }
+
check_flush_cache__single_entry(cache_ptr);
}
if ( pass ) {
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: calling check_flush_cache__multi_entry().\n",
+ fcn_name);
+ }
+
check_flush_cache__multi_entry(cache_ptr);
}
if ( pass ) {
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: calling check_flush_cache__flush_ops().\n",
+ fcn_name);
+ }
+
check_flush_cache__flush_ops(cache_ptr);
}
if ( pass ) {
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s: calling takedown_cache().\n",
+ fcn_name);
+ }
+
takedown_cache(cache_ptr, FALSE, FALSE);
}
@@ -9211,9 +9253,12 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
int check_size,
struct fo_flush_entry_check check[])
{
- /* const char * fcn_name = "check_flush_cache__flush_op_test"; */
+ const char * fcn_name = "check_flush_cache__flush_op_test";
static char msg[128];
+ hbool_t show_progress = FALSE;
+ hbool_t verbose = FALSE;
herr_t result;
+ int target_test = -1;
int i;
int j;
test_entry_t * base_addr;
@@ -9224,6 +9269,17 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
test_num);
#endif
+ if ( ( target_test > 0 ) && ( test_num != target_test ) ) {
+
+ show_progress = FALSE;
+ }
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: running sanity checks on entry(1).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
if ( cache_ptr == NULL ) {
pass = FALSE;
@@ -9251,6 +9307,12 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
failure_mssg = msg;
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: running sanity checks on entry(2).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
i = 0;
while ( ( pass ) && ( i < spec_size ) )
{
@@ -9273,6 +9335,12 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
i++;
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: running sanity checks on entry(3).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
i = 0;
while ( ( pass ) && ( i < check_size ) )
{
@@ -9310,18 +9378,54 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
i++;
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Setting up the test.\n",
+ fcn_name, test_num, (int)pass);
+ }
+
i = 0;
while ( ( pass ) && ( i < spec_size ) )
{
if ( spec[i].insert_flag ) {
+ if ( show_progress ) {
+
+ HDfprintf(stdout,
+ "%s:%d: Inserting entry(%d,%d) with flags 0x%x.\n",
+ fcn_name, test_num,
+ (int)(spec[i].entry_type),
+ (int)(spec[i].entry_index),
+ (unsigned)spec[i].flags);
+ }
+
insert_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index,
TRUE, spec[i].flags);
} else {
+ if ( show_progress ) {
+
+ HDfprintf(stdout,
+ "%s:%d: Protecting entry(%d,%d).\n",
+ fcn_name, test_num,
+ (int)(spec[i].entry_type),
+ (int)(spec[i].entry_index));
+ }
+
protect_entry(cache_ptr, spec[i].entry_type, spec[i].entry_index);
+ if ( show_progress ) {
+
+ HDfprintf(stdout,
+ "%s:%d: Unprotecting entry(%d,%d) with flags 0x%x ns = %d.\n",
+ fcn_name, test_num,
+ (int)(spec[i].entry_type),
+ (int)(spec[i].entry_index),
+ (unsigned)spec[i].flags,
+ (int)(spec[i].new_size));
+ }
+
unprotect_entry_with_size_change(cache_ptr, spec[i].entry_type,
spec[i].entry_index,
spec[i].flags, spec[i].new_size);
@@ -9363,6 +9467,12 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
}
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Running the test.\n",
+ fcn_name, test_num, (int)pass);
+ }
+
if ( pass ) {
result = H5C_flush_cache(NULL, -1, -1, cache_ptr, flush_flags);
@@ -9377,6 +9487,11 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
}
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Checking test results(1).\n",
+ fcn_name, test_num, (int)pass);
+ }
i = 0;
while ( ( pass ) && ( i < spec_size ) )
@@ -9415,6 +9530,12 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
i++;
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Checking test results(2).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
if ( pass ) {
i = 0;
@@ -9533,6 +9654,12 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
}
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Checking test results(3).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
if ( pass ) {
if ( ( ( (flush_flags & H5C__FLUSH_INVALIDATE_FLAG) == 0 )
@@ -9561,6 +9688,13 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
}
/* clean up the cache to prep for the next test */
+
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Cleaning up after test(1).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
if ( pass ) {
result = H5C_flush_cache(NULL, -1, -1, cache_ptr,
@@ -9575,17 +9709,35 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
failure_mssg = msg;
}
else if ( ( cache_ptr->index_len != 0 ) ||
- ( cache_ptr->index_size != 0 ) ) {
+ ( cache_ptr->index_size != 0 ) ||
+ ( cache_ptr->clean_index_size != 0 ) ||
+ ( cache_ptr->dirty_index_size != 0 ) ) {
pass = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout, "%s:%d: il/is/cis/dis = %lld/%lld/%lld/%lld.\n",
+ fcn_name, test_num,
+ (long long)(cache_ptr->index_len),
+ (long long)(cache_ptr->index_size),
+ (long long)(cache_ptr->clean_index_size),
+ (long long)(cache_ptr->dirty_index_size));
+ }
HDsnprintf(msg, (size_t)128,
- "Unexpected cache len/size after cleanup in flush op test #%d.",
+ "Unexpected cache len/size/cs/ds after cleanup in flush op test #%d.",
test_num);
failure_mssg = msg;
}
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Cleaning up after test(2).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
i = 0;
while ( ( pass ) && ( i < spec_size ) )
{
@@ -9602,6 +9754,12 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
i++;
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Cleaning up after test(3).\n",
+ fcn_name, test_num, (int)pass);
+ }
+
i = 0;
while ( ( pass ) && ( i < check_size ) )
{
@@ -9618,6 +9776,11 @@ check_flush_cache__flush_op_test(H5C_t * cache_ptr,
i++;
}
+ if ( show_progress ) {
+
+ HDfprintf(stdout, "%s:%d:%d: Done.\n", fcn_name, test_num, (int)pass);
+ }
+
return;
} /* check_flush_cache__flush_op_test() */
@@ -9716,30 +9879,34 @@ check_flush_cache__flush_op_eviction_test(H5C_t * cache_ptr)
{ LARGE_ENTRY_TYPE, 13, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE }
};
- if ( cache_ptr == NULL ) {
+ if ( pass ) {
- pass = FALSE;
- failure_mssg = "cache_ptr NULL on entry to flush ops test.";
- }
- else if ( ( cache_ptr->index_len != 0 ) ||
- ( cache_ptr->index_size != 0 ) ) {
+ if ( cache_ptr == NULL ) {
- pass = FALSE;
- failure_mssg = "cache not empty at start of flush ops eviction test.";
- }
- else if ( ( cache_ptr->max_cache_size != (2 * 1024 * 1024 ) ) ||
- ( cache_ptr->min_clean_size != (1 * 1024 * 1024 ) ) ) {
+ pass = FALSE;
+ failure_mssg = "cache_ptr NULL on entry to flush ops test.";
+ }
+ else if ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ) {
- pass = FALSE;
- failure_mssg = "unexpected cache config at start of flush op eviction test.";
+ pass = FALSE;
+ failure_mssg = "cache not empty at start of flush ops eviction test.";
+ }
+ else if ( ( cache_ptr->max_cache_size != (2 * 1024 * 1024 ) ) ||
+ ( cache_ptr->min_clean_size != (1 * 1024 * 1024 ) ) ) {
- } else {
+ pass = FALSE;
+ failure_mssg =
+ "unexpected cache config at start of flush op eviction test.";
- /* set min clean size to zero for this test as it simplifies
- * computing the expected cache size after each operation.
- */
+ } else {
+
+ /* set min clean size to zero for this test as it simplifies
+ * computing the expected cache size after each operation.
+ */
- cache_ptr->min_clean_size = 0;
+ cache_ptr->min_clean_size = 0;
+ }
}
if ( pass ) {
@@ -10793,7 +10960,8 @@ check_flush_cache__flush_op_eviction_test(H5C_t * cache_ptr)
static void
check_flush_cache__single_entry(H5C_t * cache_ptr)
{
- /* const char * fcn_name = "check_flush_cache__single_entry"; */
+ const char * fcn_name = "check_flush_cache__single_entry";
+ hbool_t show_progress = FALSE;
if ( cache_ptr == NULL ) {
@@ -10809,6 +10977,10 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 1);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10824,10 +10996,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 2);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10843,10 +11023,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 3);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10862,10 +11050,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 4);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10881,10 +11077,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 5);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10900,10 +11104,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 6);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10919,10 +11131,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 7);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10938,10 +11158,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 8);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10957,10 +11185,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 9);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10977,10 +11213,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 10);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -10997,10 +11241,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 11);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11017,10 +11269,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 12);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11037,10 +11297,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 13);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11057,10 +11325,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 14);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11077,10 +11353,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 15);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11098,10 +11382,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 16);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11119,10 +11411,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 17);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11138,10 +11438,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 18);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11157,10 +11465,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 19);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11176,10 +11492,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 20);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11195,10 +11519,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 21);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11214,10 +11546,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 22);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11233,10 +11573,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 23);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11252,10 +11600,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 24);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11271,10 +11627,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 25);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11291,10 +11655,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 26);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11311,10 +11683,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 27);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11331,10 +11711,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 28);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11351,10 +11739,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 29);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11371,10 +11767,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 30);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11391,10 +11795,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 31);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11412,10 +11824,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 32);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11433,10 +11853,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 33);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11452,10 +11880,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 34);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11471,10 +11907,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 35);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11490,10 +11934,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 36);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11509,10 +11961,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 37);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11528,10 +11988,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 38);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11547,10 +12015,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 39);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11566,10 +12042,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 40);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11585,10 +12069,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 41);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11605,10 +12097,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 42);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11625,10 +12125,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 43);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11645,10 +12153,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 44);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11665,10 +12181,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 45);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11685,10 +12209,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 46);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11705,10 +12237,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 47);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11726,10 +12266,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 48);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11747,10 +12295,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 49);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11766,10 +12322,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 50);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11785,10 +12349,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 51);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11804,10 +12376,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 52);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11823,10 +12403,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 53);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11842,10 +12430,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 54);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11861,10 +12457,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 55);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11880,10 +12484,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 56);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11899,10 +12511,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 57);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11919,10 +12539,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 58);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11939,10 +12567,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 59);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11959,10 +12595,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 60);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11979,10 +12623,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ FALSE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 61);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -11999,10 +12651,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 62);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -12019,10 +12679,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ TRUE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 63);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -12040,10 +12708,18 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
if ( pass ) {
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running test %d.\n", fcn_name, 64);
+ }
+
check_flush_cache__single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -12061,6 +12737,10 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
/* expected_flushed */ FALSE,
/* expected_destroyed */ TRUE
);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: pass = %d.\n", fcn_name, (int)pass);
+ }
}
@@ -12409,6 +13089,11 @@ check_flush_cache__single_entry(H5C_t * cache_ptr)
i = 0;
while ( ( pass ) && ( i < 256 ) )
{
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: running pinned test %d.\n", fcn_name, i);
+ }
+
check_flush_cache__pinned_single_entry_test
(
/* cache_ptr */ cache_ptr,
@@ -15967,7 +16652,7 @@ static void
check_duplicate_insert_err(void)
{
const char * fcn_name = "check_duplicate_insert_err";
- herr_t result;
+ herr_t result = -1;
H5C_t * cache_ptr = NULL;
test_entry_t * base_addr;
test_entry_t * entry_ptr;
@@ -28154,6 +28839,878 @@ check_auto_cache_resize_aux_fcns(void)
/*-------------------------------------------------------------------------
+ * Function: check_metadata_blizzard_absence()
+ *
+ * Purpose: Test to verify that a 'metadata blizzard' can not occur
+ * upon insertion into the cache.
+ *
+ * A 'metadata blizzard' in this context occurs when the cache
+ * gets completely filled with all dirty entries. Upon needing
+ * to make space in the cache, the cache then has no clean
+ * entries ready to evict, and must clean every dirty entry
+ * in the cache first, due to the second chance replacement
+ * policy. (i.e. after cleaning an entry, it is bumped to the
+ * top of the LRU to make a second pass before eviction).
+ * The massive amount of sequential writes to disk while
+ * flushing the entire cache is what constitutes a 'metadata
+ * blizzard'.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * <mamcgree@hdfgroup.org>
+ * 12/16/08
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+check_metadata_blizzard_absence(hbool_t fill_via_insertion)
+{
+ const char * fcn_name = "check_metadata_blizzard_absence";
+ int entry_type = HUGE_ENTRY_TYPE;
+ size_t entry_size = HUGE_ENTRY_SIZE; /* 16 KB */
+ H5C_t * cache_ptr = NULL;
+ hbool_t show_progress = FALSE;
+ int32_t checkpoint = 0;
+ int32_t entry_idx = 0;
+ int32_t i;
+
+ /* Expected loaded status of entries depends on how they get into
+ * the cache. Insertions = not loaded, protect/unprotect = loaded.
+ */
+ hbool_t loaded = !(fill_via_insertion);
+
+ /* Set up the expected array. This is used to maintain a table of the
+ * expected status of every entry used in this test.
+ */
+ struct expected_entry_status expected[150] =
+ {
+ /* entry entry in at main */
+ /* type: index: size: cache: addr: dirty: prot: pinned: loaded: clrd: flshd: dest: */
+ { entry_type, 0, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 1, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 2, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 3, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 4, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 5, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 6, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 7, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 8, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 9, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 10, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 11, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 12, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 13, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 14, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 15, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 16, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 17, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 18, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 19, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 20, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 21, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 22, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 23, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 24, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 25, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 26, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 27, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 28, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 29, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 30, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 31, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 32, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 33, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 34, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 35, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 36, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 37, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 38, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 39, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 40, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 41, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 42, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 43, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 44, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 45, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 46, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 47, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 48, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 49, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 50, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 51, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 52, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 53, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 54, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 55, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 56, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 57, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 58, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 59, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 60, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 61, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 62, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 63, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 64, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 65, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 66, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 67, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 68, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 69, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 70, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 71, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 72, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 73, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 74, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 75, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 76, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 77, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 78, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 79, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 80, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 81, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 82, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 83, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 84, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 85, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 86, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 87, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 88, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 89, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 90, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 91, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 92, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 93, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 94, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 95, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 96, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 97, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 98, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 99, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 100, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 101, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 102, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 103, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 104, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 105, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 106, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 107, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 108, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 109, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 110, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 111, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 112, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 113, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 114, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 115, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 116, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 117, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 118, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 119, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 120, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 121, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 122, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 123, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 124, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 125, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 126, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 127, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 128, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 129, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 130, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 131, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 132, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 133, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 134, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 135, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 136, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 137, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 138, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 139, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 140, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 141, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 142, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 143, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 144, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 145, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 146, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 147, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 148, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE },
+ { entry_type, 149, entry_size, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE }
+ };
+
+ pass = TRUE;
+
+ if (fill_via_insertion) {
+
+ TESTING("to ensure metadata blizzard absence when inserting");
+
+ } else {
+
+ TESTING("to ensure metadata blizzard absence on protect/unprotect");
+ }
+
+ if ( show_progress) /* 0 */
+ HDfprintf(stdout, "\n%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Set up the cache.
+ *
+ * The max_cache_size should have room for 50 entries.
+ * The min_clean_size is half of that, or 25 entries.
+ */
+ cache_ptr = setup_cache((size_t)(50 * entry_size), /* max_cache_size */
+ (size_t)(25 * entry_size)); /* min_clean_size */
+
+ if ( cache_ptr == NULL) {
+
+ pass = FALSE;
+ failure_mssg = "bad return from cache intialization.\n";
+ }
+ }
+
+ if ( show_progress) /* 1 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 1:
+ *
+ * Inserting dirty entries into an empty cache, until the cache
+ * violates the min_clean_size requirement. The expected result is
+ * that none of the inserted entries during this phase will get
+ * flushed or evicted.
+ *
+ * This verifies that while maintaining min_clean_size, we don't go
+ * overboard and flush entries that we don't need to flush.
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( pass ) {
+
+ /* Insert 26 entries (indexes 0 through 25) into the cache.
+ *
+ * Note that we are inserting 26 entries, and not 25, because the cache
+ * will only try to adhere to the min_clean_size if it's currently
+ * being violated. Thus, on insertion of the 26th entry, since the
+ * min_clean_size will not be violated, it will accept the insertion
+ * without having to make clean space.
+ */
+
+ for (entry_idx = 0; entry_idx < 26; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* Change expected values, and verify the status of the entries
+ * after each insertion
+ */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].loaded = loaded;
+
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ }
+ }
+
+ if ( show_progress) /* 2 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 2:
+ *
+ * Inserting entries into a cache that violates the min_clean_size,
+ * until the cache is full. The expected result is that each insertion
+ * will result in the flushing of a dirty entry in the cache.
+ *
+ * This verifies that we maintain the min_clean_size. By doing so, we
+ * prevent building the situation in which a 'metadata blizzard' would
+ * occur (i.e., the cache being completely filled with dirty entries).
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( pass ) {
+
+ /* Insert the 27th entry (index = 26) into the cache.
+ *
+ * This should cause the cache to flush its least recently used entry
+ * before the insertion because it doesn't satisfy the min_clean_size
+ * constraint.
+ */
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* Verify the status of the entries.
+ *
+ * Expected status is that there are 27 entries in the cache, and
+ * entry number 0 has been cleaned.
+ *
+ * Changes from last entry verification:
+ * - entry w/ index 0 has now been flushed and is now clean.
+ * - entry w/ index 26 is now in the cache and dirty.
+ */
+
+ /* entry w/ index 0 has now been flushed and is now clean. */
+ expected[0].flushed = TRUE;
+ expected[0].is_dirty = FALSE;
+
+ /* entry w/ index 26 is now in the cache and dirty. */
+ expected[26].in_cache = TRUE;
+ expected[26].is_dirty = TRUE;
+ expected[26].loaded = loaded;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 26, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ if ( show_progress) /* 3 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Insert the 28th entry (index = 27) into the cache.
+ *
+ * This should, once again, cause the cache to flush its least
+ * recently used entry before the insertion as it again does not
+ * satisfy the min_clean_size constraint.
+ */
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* Verify the status of the entries.
+ *
+ * Expected status is that there are 28 entries in the cache, and
+ * entry numbers 0 and 1 have been cleaned.
+ *
+ * Changes from last entry verification:
+ * - entry w/ index 1 has now been flushed and is now clean.
+ * - entry w/ index 27 is now in the cache and dirty.
+ */
+
+ /* entry w/ index 1 has now been flushed and is now clean. */
+ expected[1].flushed = TRUE;
+ expected[1].is_dirty = FALSE;
+
+ /* entry w/ index 27 is now in the cache and dirty. */
+ expected[27].in_cache = TRUE;
+ expected[27].is_dirty = TRUE;
+ expected[27].loaded = loaded;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 27, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ if ( show_progress) /* 4 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Fill out the rest of the cache with entries */
+ /* Verify expected status of entries after each insertion */
+ for (entry_idx = entry_idx; entry_idx < 50; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* Expected staus is that after each insertion, the entry
+ * inserted 26 insertions ago has been flushed, and the
+ * entry currently getting inserted is now in the cache and
+ * dirty.
+ */
+ expected[entry_idx - 26].flushed = TRUE;
+ expected[entry_idx - 26].is_dirty = FALSE;
+
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].loaded = loaded;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ /* Verify that the cache is now full */
+ if ( cache_ptr->cache_full != TRUE ) {
+
+ pass = FALSE;
+ failure_mssg = "cache not completely filled.\n";
+ }
+ }
+
+ if ( show_progress) /* 5 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 3:
+ *
+ * Inserting entries into a cache that is completely full. Insertions
+ * of new entries will force evictions of old entries, but since the
+ * min_clean_size has been maintained, doing so will not result in
+ * the entire cache getting flushed in order to evict a single entry,
+ * as a clean entry will be available to flush reasonably close to
+ * the bottom of the LRU.
+ *
+ * This verifies that with a maintained min_clean_size, a metadata
+ * blizzard does not occur on insertion.
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( pass ) {
+
+ /* Insert the 51st entry (index = 50) into the cache.
+ *
+ * The cache is full prior to the insertion, so it will
+ * have to evict in order to make room for the new entry.
+ */
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* Verify the status of the entries.
+ *
+ * Changes from last entry verification:
+ * - entry w/ index 0 has been evicted.
+ * - entries w/ indices 24,25 have now been flushed and are clean.
+ * - entry w/ index 50 is now in the cache and dirty.
+ */
+
+ /* entry w/ index 0 has been evicted. */
+ expected[0].in_cache = FALSE;
+ expected[0].destroyed = TRUE;
+
+ /* entries w/ indices 24,25 have now been flushed and are clean. */
+ expected[24].flushed = TRUE;
+ expected[24].is_dirty = FALSE;
+ expected[25].flushed = TRUE;
+ expected[25].is_dirty = FALSE;
+
+ /* entry w/ index 50 is now in the cache and dirty */
+ expected[50].in_cache = TRUE;
+ expected[50].is_dirty = TRUE;
+ expected[50].loaded = loaded;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 50, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ if ( show_progress) /* 6 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+
+ if ( pass ) {
+
+ /* Insert 49 more entries (indices 51-99) into the cache.
+ *
+ * The cache will be flushing an entry on each insertion, and
+ * evicting an entry on each insertion.
+ *
+ * After each insertion, verify the expected status of the
+ * entries in the cache.
+ */
+ for (entry_idx = entry_idx; entry_idx < 100; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* This past inserted entry is now in the cache and dirty */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].loaded = loaded;
+
+ /* The entry inserted 50 insertions ago has been evicted */
+ expected[entry_idx - 50].in_cache = FALSE;
+ expected[entry_idx - 50].destroyed = TRUE;
+
+ /* If the newly inserted entry is among the first 24
+ * insertions in this loop, then the insertion will
+ * have resulted in a flush of the entry inserted
+ * 25 insertions ago. */
+ if (entry_idx < 75) {
+
+ expected[entry_idx - 25].flushed = TRUE;
+ expected[entry_idx - 25].is_dirty = FALSE;
+ }
+ /* If the newly inserted entry is among the last
+ * 25 insertions in this loop, then the insertion will
+ * have resulted in a flush of the entry inserted 26
+ * insertions ago. This switch is because there was two
+ * consecutive clean entries in the cache (due to 51/49
+ * dirty/clean ratio when full), so instead of
+ * flush-then-evict, it switches to evict-then-flush. */
+ else {
+
+ expected[entry_idx - 26].flushed = TRUE;
+ expected[entry_idx - 26].is_dirty = FALSE;
+ }
+
+ /* Verify this expected status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+ }
+
+ if ( show_progress) /* 7 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 4:
+ *
+ * Flushing the entire cache, and then inserting entries into a cache
+ * that is completely full, but all clean.
+ *
+ * Phases 1 and 2 are then repeated. Rather than inserting dirty entries
+ * into an empty cache, we're inserting into a full cache that's all
+ * clean, thus an eviction occurs before each insertion.
+ *
+ * When the cache finally hits the point of violating the
+ * min_clean_size, the bottom half of the LRU will be filled with
+ * clean entries and the top half will be filled with recently inserted
+ * dirty entries. We'll then verify that an insertion will only evict
+ * one entry and flush one entry, and no more.
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( pass ) {
+
+ /* Flush the cache.
+ *
+ * We're doing this so we can repeat the above insertions, but
+ * starting from a cache filled with clean entries as opposed
+ * to an empty cache.
+ */
+
+ flush_cache(cache_ptr, /* H5C_t * cache_ptr */
+ FALSE, /* hbool_t destory_entries */
+ FALSE, /* hbool_t dump_stats */
+ FALSE); /* hbool_t dump_detailed_stats */
+
+ /* Verify that the cache is clean */
+ verify_clean();
+
+ /* Verify the status of the entries.
+ *
+ * Changes from last entry verification:
+ * - entries w/ indices 74-99 have been flushed.
+ */
+
+ /* entries w/ indices 74-99 have been flushed. */
+ for (i = 74; i < 100; i++) {
+
+ expected[i].flushed = TRUE;
+ expected[i].is_dirty = FALSE;
+ }
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 0, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ if ( show_progress) /* 8 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Insert 26 entries (indexes 100 through 125) into the cache.
+ *
+ * The cache will evict 26 entries since it's currently full with
+ * all clean entries. None of the entries we're inserting now
+ * will get cleaned, however.
+ */
+
+ for (entry_idx = 100; entry_idx < 126; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* This past inserted entry is now in the cache and dirty */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].loaded = loaded;
+
+ /* The entry with ID minus 50 will have been evicted */
+ expected[entry_idx - 50].in_cache = FALSE;
+ expected[entry_idx - 50].destroyed = TRUE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ }
+ }
+
+ if ( show_progress) /* 9 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Insert the 127th entry (index = 126) into the cache. */
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* Verify the status of the entries.
+ *
+ * Changes from last entry verification:
+ * - entry w/ index 76 is evicted.
+ * - entry w/ index 100 is cleaned.
+ * - entry w/ index 126 is now in the cache and dirty.
+ */
+
+ /* entry w/ index 76 has been evicted. */
+ expected[76].in_cache = FALSE;
+ expected[76].destroyed = TRUE;
+
+ /* entry w/ index 100 has now been flushed and is now clean. */
+ expected[100].flushed = TRUE;
+ expected[100].is_dirty = FALSE;
+
+ /* entry w/ index 26 is now in the cache and dirty. */
+ expected[126].in_cache = TRUE;
+ expected[126].is_dirty = TRUE;
+ expected[126].loaded = loaded;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 126, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ if ( show_progress) /* 10 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Insert entries w/ indices 127 through 149 into the cache */
+ for (entry_idx = 127; entry_idx < 150; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* hbool_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+
+ } else {
+ protect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(cache_ptr, /* H5C_t * cache_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ TRUE, /* int321_t dirty */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ }
+
+ /* This past inserted entry is now in the cache and dirty */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].loaded = loaded;
+
+ /* The entry with ID minus 50 will have been evicted */
+ expected[entry_idx - 50].in_cache = FALSE;
+ expected[entry_idx - 50].destroyed = TRUE;
+
+ /* The entry with ID minus 26 will now be clean */
+ expected[entry_idx - 26].flushed = TRUE;
+ expected[entry_idx - 26].is_dirty = FALSE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ }
+
+ }
+
+ if ( show_progress) /* 11 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* We're done with testing. We can take down the cache. */
+ takedown_cache(cache_ptr, FALSE, FALSE);
+ reset_entries();
+ }
+
+ if ( show_progress) /* 12 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ return;
+
+} /* check_metadata_blizzard_absence() */
+
+
+/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Run tests on the cache code contained in H5C.c
@@ -28231,7 +29788,7 @@ main(void)
check_auto_cache_resize_epoch_markers();
check_auto_cache_resize_input_errs();
check_auto_cache_resize_aux_fcns();
-
+ check_metadata_blizzard_absence(TRUE);
+ check_metadata_blizzard_absence(FALSE);
return(0);
-
-} /* main() */
+}
diff --git a/test/cache_common.c b/test/cache_common.c
index c2ce274..b76e9f9 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -1133,7 +1133,7 @@ variable_size(H5F_t * f, void * thing, size_t * size_ptr)
/*-------------------------------------------------------------------------
* Function: add_flush_op
*
- * Purpose: Do noting if pass is FALSE on entry.
+ * Purpose: Do nothing if pass is FALSE on entry.
*
* Otherwise, add the specified flush operation to the
* target instance of test_entry_t.
@@ -1199,7 +1199,7 @@ add_flush_op(int target_type,
/*-------------------------------------------------------------------------
* Function: create_pinned_entry_dependency
*
- * Purpose: Do noting if pass is FALSE on entry.
+ * Purpose: Do nothing if pass is FALSE on entry.
*
* Otherwise, set up a pinned entry dependency so we can
* test the pinned entry modifications to the flush routine.
@@ -2339,7 +2339,9 @@ flush_cache(H5C_t * cache_ptr,
hbool_t dump_stats,
hbool_t dump_detailed_stats)
{
+ const char * fcn_name = "flush_cache()";
herr_t result = 0;
+ hbool_t verbose = TRUE;
HDassert(cache_ptr);
@@ -2369,6 +2371,26 @@ flush_cache(H5C_t * cache_ptr,
pass = FALSE;
failure_mssg = "error in H5C_flush_cache().";
}
+ else if ( ( destroy_entries ) &&
+ ( ( cache_ptr->index_len != 0 ) ||
+ ( cache_ptr->index_size != 0 ) ||
+ ( cache_ptr->clean_index_size != 0 ) ||
+ ( cache_ptr->dirty_index_size != 0 ) ) ) {
+
+ if ( verbose ) {
+ HDfprintf(stdout,
+ "%s: unexpected il/is/cis/dis = %lld/%lld/%lld/%lld.\n",
+ fcn_name,
+ (long long)(cache_ptr->index_len),
+ (long long)(cache_ptr->index_size),
+ (long long)(cache_ptr->clean_index_size),
+ (long long)(cache_ptr->dirty_index_size));
+ }
+ pass = FALSE;
+ failure_mssg =
+ "non zero index len/sizes after H5C_flush_cache() with invalidate.";
+ }
+
return;
@@ -3229,12 +3251,13 @@ unprotect_entry_with_size_change(H5C_t * cache_ptr,
unsigned int flags,
size_t new_size)
{
- /* const char * fcn_name = "unprotect_entry_with_size_change()"; */
+ const char * fcn_name = "unprotect_entry_with_size_change()";
herr_t result;
hbool_t dirty_flag_set;
hbool_t pin_flag_set;
hbool_t unpin_flag_set;
hbool_t size_changed_flag_set;
+ hbool_t verbose = FALSE;
test_entry_t * base_addr;
test_entry_t * entry_ptr;
@@ -3285,6 +3308,40 @@ unprotect_entry_with_size_change(H5C_t * cache_ptr,
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
+ if ( verbose ) {
+
+ if ( result < 0 ) {
+ HDfprintf(stdout, "%s: H5C_unprotect() failed.\n", fcn_name);
+ H5Eprint(H5E_DEFAULT, stdout);
+ }
+
+ if ( entry_ptr->header.is_protected ) {
+ HDfprintf(stdout, "%s: entry still protected?!?.\n",
+ fcn_name);
+ }
+
+ if ( entry_ptr->header.type != &(types[type]) ) {
+ HDfprintf(stdout,
+ "%s: entry has bad type after unprotect.\n",
+ fcn_name);
+ }
+
+ if ( entry_ptr->size != entry_ptr->header.size ) {
+ HDfprintf(stdout,
+ "%s: bad entry size after unprotect. e/a = %d/%d\n",
+ fcn_name,
+ (int)(entry_ptr->size),
+ (int)(entry_ptr->header.size));
+ }
+
+ if ( entry_ptr->addr != entry_ptr->header.addr ) {
+ HDfprintf(stdout,
+ "%s: bad entry addr after unprotect. e/a = 0x%llx/0x%llx\n",
+ fcn_name,
+ (long long)(entry_ptr->addr),
+ (long long)(entry_ptr->header.addr));
+ }
+ }
pass = FALSE;
failure_mssg = "error in H5C_unprotect().";