From 5c415042a3e1d5480ff5309ddce29b4f6be753d7 Mon Sep 17 00:00:00 2001 From: John Mainzer Date: Fri, 17 Dec 2004 20:30:34 -0500 Subject: [svn-r9687] Purpose: Modify the cache code (H5C) to support automatic cache resizing to adapt to the work load at run time. Description: Different applications require different sized caches to maintain an acceptable hit rate. This set of changes attempts to provide the ability to adjust to circumstances automatically. Solution: Added highly configurable code to allow the user to either set a fixed cache size, or allow the cache to grow and shrink according to conditions. If enabled, cache size increases are triggered when the hit rate drops below a user specified threshold in a user specified interval. Cache size reductions (if enabled) are triggered when either the hit rate exceeds some user specified threshold over a user specified interval, when the cache contains "enough" entries that haven't been accessed for a user specified interval, or some mix of the above. See the header comments on the H5C_auto_size_ctl_t structure in H5Cprivate.h for further details. At present, the cache resize configuration options are not accessible via the user API. Must add this. Platforms tested: h5committested, heping (serial), and copper (parallel) Misc. update: --- src/H5AC.c | 78 +- src/H5C.c | 3403 ++++++++++++---- src/H5Cpkg.h | 686 ++++ src/H5Cprivate.h | 355 +- src/Makefile.in | 17 +- test/cache.c | 11191 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 6 files changed, 14344 insertions(+), 1386 deletions(-) create mode 100644 src/H5Cpkg.h diff --git a/src/H5AC.c b/src/H5AC.c index a051460..c0c22fc 100644 --- a/src/H5AC.c +++ b/src/H5AC.c @@ -352,8 +352,60 @@ const char * H5AC_entry_type_names[H5AC_NTYPES] = int H5AC_create(const H5F_t *f, int UNUSED size_hint) { - H5AC_t *cache = NULL; - int ret_value=SUCCEED; /* Return value */ + H5AC_t * cache_ptr = NULL; + int ret_value = SUCCEED; /* Return value */ +#if 1 /* JRM */ /* test code -- remove when done */ + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, +#if 1 + /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, +#else + /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, +#endif + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (1 * 1024 * 1024), + + /* double min_clean_fraction = */ 0.25, + + /* size_t max_size = */ (32 * 1024 * 1024), + /* size_t min_size = */ ( 1 * 1024 * 1024), + + /* int64_t epoch_length = */ 50000, + +#if 0 + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__off, +#else + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, +#endif + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (8 * 1024 * 1024), + +#if 0 + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__off, +#else + /* enum H5C_cache_decr_mode decr_mode = */ + H5C_decr__age_out_with_threshold, +#endif + + /* double upper_hr_threshold = */ 0.999, + + /* double decrement = */ 0.9, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.1 + }; + +#endif /* JRM */ FUNC_ENTER_NOAPI(H5AC_create, FAIL) @@ -364,29 +416,41 @@ H5AC_create(const H5F_t *f, int UNUSED size_hint) * in proper size hints. * -- JRM */ - cache = H5C_create(H5C__DEFAULT_MAX_CACHE_SIZE, + cache_ptr = H5C_create(H5C__DEFAULT_MAX_CACHE_SIZE, H5C__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1), (const char **)H5AC_entry_type_names, H5AC_check_if_write_permitted); - if ( NULL == cache ) { + if ( NULL == cache_ptr ) { HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") } else { - f->shared->cache = cache; + f->shared->cache = cache_ptr; } +#if 1 /* JRM */ /* test code -- remove when done */ + if ( cache_ptr ) { + + if ( H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl) + != SUCCEED ) { + + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \ + "auto resize config test failed") + } + } +#endif /* JRM */ + done: if ( ret_value < 0 ) { - if ( cache != NULL ) { + if ( cache_ptr != NULL ) { - H5C_dest_empty(cache); + H5C_dest_empty(cache_ptr); f->shared->cache = NULL; } /* end if */ diff --git a/src/H5C.c b/src/H5C.c index af96125..1e2f92a 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -75,12 +75,14 @@ * * Tests: * - * - Trim execution time. + * - Trim execution time. (This is no longer a major issue with the + * shift from the TBBT to a hash table for indexing.) * * - Add random tests. * **************************************************************************/ +#define H5C_PACKAGE /*suppress error about including H5Cpkg */ #define H5F_PACKAGE /*suppress error about including H5Fpkg */ /* Pablo information */ @@ -88,7 +90,7 @@ #define PABLO_MASK H5C_mask #include "H5private.h" /* Generic Functions */ -#include "H5Cprivate.h" /* Cache */ +#include "H5Cpkg.h" /* Cache */ #include "H5Dprivate.h" /* Dataset functions */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fpkg.h" /* Files */ @@ -120,6 +122,28 @@ * do not reference the sanity checking macros. * JRM - 5/5/04 * + * Changes: + * + * - Removed the line: + * + * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) || + * + * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the + * epoch markers used in the age out based cache size reduction algorithm, + * this invarient need not hold, as the epoch markers are of size 0. + * + * One could argue that I should have given the epoch markers a positive + * size, but this would break the index_size = LRU_list_size + pl_size + * invarient. + * + * Alternatively, I could pass the current decr_mode in to the macro, + * and just skip the check whenever epoch markers may be in use. + * + * However, any size errors should be caught when the cache is flushed + * and destroyed. Until we are tracking such an error, this should be + * good enough. + * JRM - 12/9/04 + * ****************************************************************************/ #if H5C_DO_SANITY_CHECKS @@ -130,7 +154,6 @@ if ( ( (head_ptr) == NULL ) || \ ( (entry_ptr) == NULL ) || \ ( (len) <= 0 ) || \ ( (Size) < (entry_ptr)->size ) || \ - ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) || \ ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \ ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ ( ( (len) == 1 ) && \ @@ -416,8 +439,18 @@ if ( ( (entry_ptr) == NULL ) || \ * The following macros must handle stats collection when this collection * is enabled, and evaluate to the empty string when it is not. * + * The sole exception to this rule is + * H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as + * the cache hit rate stats are always collected and available. + * ***********************************************************************/ +#define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ + (cache_ptr->cache_accesses)++; \ + if ( hit ) { \ + (cache_ptr->cache_hits)++; \ + } \ + #if H5C_COLLECT_CACHE_STATS #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ @@ -581,7 +614,7 @@ if ( ( (entry_ptr) == NULL ) || \ * ***********************************************************************/ -#define H5C__HASH_TABLE_LEN (32 * 1024) /* must be a power of 2 */ +/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */ #define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) @@ -807,8 +840,10 @@ if ( ( (cache_ptr) == NULL ) || \ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ HDassert( !((entry_ptr)->in_slist) ); \ \ - if ( H5SL_insert((cache_ptr)->slist_ptr, &entry_ptr->addr, entry_ptr) < 0 ) \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Can't insert entry in skip list") \ + if ( H5SL_insert((cache_ptr)->slist_ptr, &(entry_ptr)->addr, entry_ptr) \ + < 0 ) \ + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \ + "Can't insert entry in skip list") \ \ (entry_ptr)->in_slist = TRUE; \ (cache_ptr)->slist_len++; \ @@ -1554,556 +1589,202 @@ if ( ( (cache_ptr) == NULL ) || \ #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ +/* + * Private file-scope variables. + */ + +/* Declare a free list to manage the H5C_t struct */ +H5FL_DEFINE_STATIC(H5C_t); + +/* + * Private file-scope function declarations: + */ + +static herr_t H5C__auto_adjust_cache_size(H5C_t * cache_ptr, + H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + hbool_t write_permitted, + hbool_t * first_flush_ptr); + +static herr_t H5C__autoadjust__ageout(H5C_t * cache_ptr, + double hit_rate, + enum H5C_resize_status * status_ptr, + size_t * new_max_cache_size_ptr, + H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + hbool_t write_permitted, + hbool_t * first_flush_ptr); + +static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr); + +static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + hbool_t write_permitted, + hbool_t * first_flush_ptr); + +static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr); + +static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr); + +static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr); + +static herr_t H5C_flush_single_entry(H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + const H5C_class_t * type_ptr, + haddr_t addr, + unsigned flags, + hbool_t * first_flush_ptr, + hbool_t del_entry_from_slist_on_destroy); + +static void * H5C_load_entry(H5F_t * f, + hid_t dxpl_id, + const H5C_class_t * type, + haddr_t addr, + const void * udata1, + void * udata2, + hbool_t skip_file_checks); + +static herr_t H5C_make_space_in_cache(H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + size_t space_needed, + hbool_t write_permitted, + hbool_t * first_flush_ptr); + + /**************************************************************************** * - * structure H5C_t - * - * Catchall structure for all variables specific to an instance of the cache. - * - * While the individual fields of the structure are discussed below, the - * following overview may be helpful. - * - * Entries in the cache are stored in an instance of H5TB_TREE, indexed on - * the entry's disk address. While the H5TB_TREE is less efficient than - * hash table, it keeps the entries in address sorted order. As flushes - * in parallel mode are more efficient if they are issued in increasing - * address order, this is a significant benefit. Also the H5TB_TREE code - * was readily available, which reduced development time. - * - * While the cache was designed with multiple replacement policies in mind, - * at present only a modified form of LRU is supported. - * - * JRM - 4/26/04 - * - * Profiling has indicated that searches in the instance of H5TB_TREE are - * too expensive. To deal with this issue, I have augmented the cache - * with a hash table in which all entries will be stored. Given the - * advantages of flushing entries in increasing address order, the TBBT - * is retained, but only dirty entries are stored in it. At least for - * now, we will leave entries in the TBBT after they are flushed. - * - * Note that index_size and index_len now refer to the total size of - * and number of entries in the hash table. - * - * JRM - 7/19/04 - * - * Note that the dirty entries are now stored in a skip list, instead of - * the threaded, balanced binary tree (TBBT). - * - * QAK - 11/27/04 - * - * ********************************************* - * - * WARNING: A copy of H5C_t is in tst/cache.c (under the name "local_H5C_t" - * to allow the test code to access the internal fields of the - * cache. If you modify H5C_t, be sure to update local_H5C_t - * in cache.c as well. - * - * ********************************************* - * - * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. This - * field is used to validate pointers to instances of H5C_t. - * - * max_type_id: Integer field containing the maximum type id number assigned - * to a type of entry in the cache. All type ids from 0 to - * max_type_id inclusive must be defined. The names of the - * types are stored in the type_name_table discussed below, and - * indexed by the ids. - * - * type_name_table_ptr: Pointer to an array of pointer to char of length - * max_type_id + 1. The strings pointed to by the entries - * in the array are the names of the entry types associated - * with the indexing type IDs. - * - * max_cache_size: Nominal maximum number of bytes that may be stored in the - * cache. This value should be viewed as a soft limit, as the - * cache can exceed this value under the following circumstances: - * - * a) All entries in the cache are protected, and the cache is - * asked to insert a new entry. In this case the new entry - * will be created. If this causes the cache to exceed - * max_cache_size, it will do so. The cache will attempt - * to reduce its size as entries are unprotected. - * - * b) When running in parallel mode, the cache may not be - * permitted to flush a dirty entry in response to a read. - * If there are no clean entries available to evict, the - * cache will exceed its maximum size. Again the cache - * will attempt to reduce its size to the max_cache_size - * limit on the next cache write. - * - * min_clean_size: Nominal minimum number of clean bytes in the cache. - * The cache attempts to maintain this number of bytes of - * clean data so as to avoid case b) above. Again, this is - * a soft limit. - * - * - * In addition to the call back functions required for each entry, the - * cache requires the following call back functions for this instance of - * the cache as a whole: - * - * check_write_permitted: In certain applications, the cache may not - * be allowed to write to disk at certain time. If specified, - * the check_write_permitted function is used to determine if - * a write is permissible at any given point in time. - * - * If no such function is specified (i.e. this field is NULL), - * the cache will presume that writes are always permissable. - * - * - * The cache requires an index to facilitate searching for entries. The - * following fields support that index. - * - * index_len: Number of entries currently in the hash table used to index - * the cache. - * - * index_size: Number of bytes of cache entries currently stored in the - * hash table used to index the cache. - * - * This value should not be mistaken for footprint of the - * cache in memory. The average cache entry is small, and - * the cache has a considerable overhead. Multiplying the - * index_size by two should yield a conservative estimate - * of the cache's memory footprint. - * - * index: Array of pointer to H5C_cache_entry_t of size - * H5C__HASH_TABLE_LEN. At present, this value is a power - * of two, not the usual prime number. - * - * I hope that the variable size of cache elements, the large - * hash table size, and the way in which HDF5 allocates space - * will combine to avoid problems with periodicity. If so, we - * can use a trivial hash function (a bit-and and a 3 bit left - * shift) with some small savings. - * - * If not, it will become evident in the statistics. Changing - * to the usual prime number length hash table will require - * changing the H5C__HASH_FCN macro and the deletion of the - * H5C__HASH_MASK #define. No other changes should be required. - * - * - * When we flush the cache, we need to write entries out in increasing - * address order. An instance of a skip list is used to store dirty entries in - * sorted order. Whether it is cheaper to sort the dirty entries as needed, - * or to maintain the list is an open question. At a guess, it depends - * on how frequently the cache is flushed. We will see how it goes. - * - * For now at least, I will not remove dirty entries from the list as they - * are flushed. - * - * slist_len: Number of entries currently in the skip list - * used to maintain a sorted list of dirty entries in the - * cache. - * - * slist_size: Number of bytes of cache entries currently stored in the - * skip list used to maintain a sorted list of - * dirty entries in the cache. - * - * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted - * list of dirty entries in the cache. This sorted list has - * two uses: - * - * a) It allows us to flush dirty entries in increasing address - * order, which results in significant savings. - * - * b) It facilitates checking for adjacent dirty entries when - * attempting to evict entries from the cache. While we - * don't use this at present, I hope that this will allow - * some optimizations when I get to it. - * - * - * When a cache entry is protected, it must be removed from the LRU - * list(s) as it cannot be either flushed or evicted until it is unprotected. - * The following fields are used to implement the protected list (pl). - * - * pl_len: Number of entries currently residing on the protected list. - * - * pl_size: Number of bytes of cache entries currently residing on the - * protected list. - * - * pl_head_ptr: Pointer to the head of the doubly linked list of protected - * entries. Note that cache entries on this list are linked - * by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected - * entries. Note that cache entries on this list are linked - * by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * - * The cache must have a replacement policy, and the fields supporting this - * policy must be accessible from this structure. - * - * While there has been interest in several replacement policies for - * this cache, the initial development schedule is tight. Thus I have - * elected to support only a modified LRU policy for the first cut. - * - * To further simplify matters, I have simply included the fields needed - * by the modified LRU in this structure. When and if we add support for - * other policies, it will probably be easiest to just add the necessary - * fields to this structure as well -- we only create one instance of this - * structure per file, so the overhead is not excessive. - * - * - * Fields supporting the modified LRU policy: - * - * See most any OS text for a discussion of the LRU replacement policy. - * - * When operating in parallel mode, we must ensure that a read does not - * cause a write. If it does, the process will hang, as the write will - * be collective and the other processes will not know to participate. - * - * To deal with this issue, I have modified the usual LRU policy by adding - * clean and dirty LRU lists to the usual LRU list. - * - * The clean LRU list is simply the regular LRU list with all dirty cache - * entries removed. - * - * Similarly, the dirty LRU list is the regular LRU list with all the clean - * cache entries removed. - * - * When reading in parallel mode, we evict from the clean LRU list only. - * This implies that we must try to ensure that the clean LRU list is - * reasonably well stocked at all times. - * - * We attempt to do this by trying to flush enough entries on each write - * to keep the cLRU_list_size >= min_clean_size. - * - * Even if we start with a completely clean cache, a sequence of protects - * without unprotects can empty the clean LRU list. In this case, the - * cache must grow temporarily. At the next write, we will attempt to - * evict enough entries to reduce index_size to less than max_cache_size. - * While this will usually be possible, all bets are off if enough entries - * are protected. - * - * Discussions of the individual fields used by the modified LRU replacement - * policy follow: - * - * LRU_list_len: Number of cache entries currently on the LRU list. - * - * Observe that LRU_list_len + pl_len must always equal - * index_len. - * - * LRU_list_size: Number of bytes of cache entries currently residing on the - * LRU list. - * - * Observe that LRU_list_size + pl_size must always equal - * index_size. - * - * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache - * entries on this list are linked by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache - * entries on this list are linked by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * cLRU_list_len: Number of cache entries currently on the clean LRU list. - * - * Observe that cLRU_list_len + dLRU_list_len must always - * equal LRU_list_len. - * - * cLRU_list_size: Number of bytes of cache entries currently residing on - * the clean LRU list. - * - * Observe that cLRU_list_size + dLRU_list_size must always - * equal LRU_list_size. - * - * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. + * #defines and declarations for epoch marker cache entries. * - * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. - * - * dLRU_list_len: Number of cache entries currently on the dirty LRU list. - * - * Observe that cLRU_list_len + dLRU_list_len must always - * equal LRU_list_len. - * - * dLRU_list_size: Number of cache entries currently on the dirty LRU list. - * - * Observe that cLRU_list_len + dLRU_list_len must always - * equal LRU_list_len. - * - * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. - * - * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. - * - * - * Statistics collection fields: - * - * When enabled, these fields are used to collect statistics as described - * below. The first set are collected only when H5C_COLLECT_CACHE_STATS - * is true. - * - * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the number of times an entry with type id - * equal to the array index has been in cache when requested in - * the current epoch. - * - * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the number of times an entry with type id - * equal to the array index has not been in cache when - * requested in the current epoch. - * - * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been inserted into the - * cache in the current epoch. - * - * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been cleared in the current - * epoch. - * - * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the number of times an entry with type id - * equal to the array index has been written to disk in the - * current epoch. - * - * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the number of times an entry with type id - * equal to the array index has been evicted from the cache in - * the current epoch. - * - * renames: Array of int64 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been renamed in the current - * epoch. - * - * total_ht_insertions: Number of times entries have been inserted into the - * hash table in the current epoch. - * - * total_ht_deletions: Number of times entries have been deleted from the - * hash table in the current epoch. - * - * successful_ht_searches: int64 containing the total number of successful - * searches of the hash table in the current epoch. - * - * total_successful_ht_search_depth: int64 containing the total number of - * entries other than the targets examined in successful - * searches of the hash table in the current epoch. - * - * failed_ht_searches: int64 containing the total number of unsuccessful - * searches of the hash table in the current epoch. - * - * total_failed_ht_search_depth: int64 containing the total number of - * entries examined in unsuccessful searches of the hash - * table in the current epoch. - * - * max_index_len: Largest value attained by the index_len field in the - * current epoch. - * - * max_index_size: Largest value attained by the index_size field in the - * current epoch. - * - * max_slist_len: Largest value attained by the slist_len field in the - * current epoch. - * - * max_slist_size: Largest value attained by the slist_size field in the - * current epoch. - * - * max_pl_len: Largest value attained by the pl_len field in the - * current epoch. - * - * max_pl_size: Largest value attained by the pl_size field in the - * current epoch. - * - * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS - * and H5C_COLLECT_CACHE_ENTRY_STATS are true. - * - * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the maximum number of times any single - * entry with type id equal to the array index has been - * accessed in the current epoch. - * - * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the minimum number of times any single - * entry with type id equal to the array index has been - * accessed in the current epoch. - * - * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the maximum number of times any single - * entry with type id equal to the array index has been cleared - * in the current epoch. - * - * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the maximum number of times any single - * entry with type id equal to the array index has been - * flushed in the current epoch. - * - * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS. The cells - * are used to record the maximum size of any single entry - * with type id equal to the array index that has resided in - * the cache in the current epoch. - * - * - * Fields supporting testing: - * - * For test purposes, it is useful to turn off some asserts and sanity - * checks. The following flags support this. - * - * skip_file_checks: Boolean flag used to skip sanity checks on file - * parameters passed to the cache. In the test bed, there - * is no reason to have a file open, as the cache proper - * just passes these parameters through without using them. - * - * When this flag is set, all sanity checks on the file - * parameters are skipped. The field defaults to FALSE. - * - * skip_dxpl_id_checks: Boolean flag used to skip sanity checks on the - * dxpl_id parameters passed to the cache. These are not - * used directly by the cache, so skipping the checks - * simplifies the test bed. - * - * When this flag is set, all sanity checks on the dxpl_id - * parameters are skipped. The field defaults to FALSE. + * As a strategy for automatic cache size reduction, the cache may insert + * marker entries in the LRU list at the end of each epoch. These markers + * are then used to identify entries that have not been accessed for n + * epochs so that they can be evicted from the cache. * ****************************************************************************/ -#define H5C__H5C_T_MAGIC 0x005CAC0E -#define H5C__MAX_NUM_TYPE_IDS 9 +/* Note that H5C__MAX_EPOCH_MARKERS is defined in H5Cpkg.h, not here because + * it is needed to dimension an array in H5C_t. + */ -struct H5C_t -{ - uint32_t magic; +#define H5C__EPOCH_MARKER_TYPE H5C__MAX_NUM_TYPE_IDS - int32_t max_type_id; - const char * (* type_name_table_ptr); +static void *H5C_epoch_marker_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, + const void *udata1, void *udata2); +static herr_t H5C_epoch_marker_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, + haddr_t addr, void *thing); +static herr_t H5C_epoch_marker_dest(H5F_t *f, void *thing); +static herr_t H5C_epoch_marker_clear(H5F_t *f, void *thing, hbool_t dest); +static herr_t H5C_epoch_marker_size(H5F_t *f, void *thing, size_t *size_ptr); - size_t max_cache_size; - size_t min_clean_size; +const H5C_class_t epoch_marker_class = +{ + /* id = */ H5C__EPOCH_MARKER_TYPE, + /* load = */ &H5C_epoch_marker_load, + /* flush = */ &H5C_epoch_marker_flush, + /* dest = */ &H5C_epoch_marker_dest, + /* clear = */ &H5C_epoch_marker_clear, + /* size = */ &H5C_epoch_marker_size +}; - H5C_write_permitted_func_t check_write_permitted; +/*************************************************************************** + * Class functions for H5C__EPOCH_MAKER_TYPE: + * + * None of these functions should ever be called, so there is no point in + * documenting them separately. + * JRM - 11/16/04 + * + ***************************************************************************/ - int32_t index_len; - size_t index_size; - H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]); +static void * +H5C_epoch_marker_load(UNUSED H5F_t *f, + UNUSED hid_t dxpl_id, + UNUSED haddr_t addr, + UNUSED const void *udata1, + UNUSED void *udata2) +{ + void * ret_value = NULL; /* Return value */ + FUNC_ENTER_NOAPI(H5C_epoch_marker_load, NULL) - int32_t slist_len; - size_t slist_size; - H5SL_t * slist_ptr; + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "called unreachable fcn.") - int32_t pl_len; - size_t pl_size; - H5C_cache_entry_t * pl_head_ptr; - H5C_cache_entry_t * pl_tail_ptr; +done: - int32_t LRU_list_len; - size_t LRU_list_size; - H5C_cache_entry_t * LRU_head_ptr; - H5C_cache_entry_t * LRU_tail_ptr; + FUNC_LEAVE_NOAPI(ret_value) +} - int32_t cLRU_list_len; - size_t cLRU_list_size; - H5C_cache_entry_t * cLRU_head_ptr; - H5C_cache_entry_t * cLRU_tail_ptr; +static herr_t +H5C_epoch_marker_flush(UNUSED H5F_t *f, + UNUSED hid_t dxpl_id, + UNUSED hbool_t dest, + UNUSED haddr_t addr, + UNUSED void *thing) +{ + herr_t ret_value = FAIL; /* Return value */ - int32_t dLRU_list_len; - size_t dLRU_list_size; - H5C_cache_entry_t * dLRU_head_ptr; - H5C_cache_entry_t * dLRU_tail_ptr; + FUNC_ENTER_NOAPI(H5C_epoch_marker_flush, FAIL) -#if H5C_COLLECT_CACHE_STATS + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.") - /* stats fields */ - int64_t hits[H5C__MAX_NUM_TYPE_IDS]; - int64_t misses[H5C__MAX_NUM_TYPE_IDS]; - int64_t insertions[H5C__MAX_NUM_TYPE_IDS]; - int64_t clears[H5C__MAX_NUM_TYPE_IDS]; - int64_t flushes[H5C__MAX_NUM_TYPE_IDS]; - int64_t evictions[H5C__MAX_NUM_TYPE_IDS]; - int64_t renames[H5C__MAX_NUM_TYPE_IDS]; +done: - int64_t total_ht_insertions; - int64_t total_ht_deletions; - int64_t successful_ht_searches; - int64_t total_successful_ht_search_depth; - int64_t failed_ht_searches; - int64_t total_failed_ht_search_depth; + FUNC_LEAVE_NOAPI(ret_value) +} - int32_t max_index_len; - size_t max_index_size; +static herr_t +H5C_epoch_marker_dest(UNUSED H5F_t *f, + UNUSED void *thing) +{ + herr_t ret_value = FAIL; /* Return value */ - int32_t max_slist_len; - size_t max_slist_size; + FUNC_ENTER_NOAPI(H5C_epoch_marker_dest, FAIL) - int32_t max_pl_len; - size_t max_pl_size; + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.") -#if H5C_COLLECT_CACHE_ENTRY_STATS +done: - int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS]; - int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS]; - int32_t max_clears[H5C__MAX_NUM_TYPE_IDS]; - int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS]; - size_t max_size[H5C__MAX_NUM_TYPE_IDS]; + FUNC_LEAVE_NOAPI(ret_value) +} -#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ +static herr_t +H5C_epoch_marker_clear(UNUSED H5F_t *f, + UNUSED void *thing, + UNUSED hbool_t dest) +{ + herr_t ret_value = FAIL; /* Return value */ -#endif /* H5C_COLLECT_CACHE_STATS */ + FUNC_ENTER_NOAPI(H5C_epoch_marker_clear, FAIL) - hbool_t skip_file_checks; - hbool_t skip_dxpl_id_checks; + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.") -}; +done: - -/* - * Private file-scope variables. - */ + FUNC_LEAVE_NOAPI(ret_value) +} -/* Declare a free list to manage the H5C_t struct */ -H5FL_DEFINE_STATIC(H5C_t); +static herr_t +H5C_epoch_marker_size(UNUSED H5F_t *f, + UNUSED void *thing, + UNUSED size_t *size_ptr) +{ + herr_t ret_value = FAIL; /* Return value */ -/* - * Private file-scope function declarations: - */ + FUNC_ENTER_NOAPI(H5C_epoch_marker_size, FAIL) -static herr_t H5C_flush_single_entry(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - H5C_t * cache_ptr, - const H5C_class_t * type_ptr, - haddr_t addr, - unsigned flags, - hbool_t * first_flush_ptr, - hbool_t del_entry_from_slist_on_destroy); + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "called unreachable fcn.") -static void * H5C_load_entry(H5F_t * f, - hid_t dxpl_id, - const H5C_class_t * type, - haddr_t addr, - const void * udata1, - void * udata2, - hbool_t skip_file_checks); +done: -static herr_t H5C_make_space_in_cache(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - H5C_t * cache_ptr, - size_t space_needed, - hbool_t write_permitted); + FUNC_LEAVE_NOAPI(ret_value) +} /*------------------------------------------------------------------------- @@ -2133,6 +1814,18 @@ static herr_t H5C_make_space_in_cache(H5F_t * f, * JRM -- 7/20/04 * Updated for the addition of the hash table. * + * JRM -- 10/5/04 + * Added call to H5C_reset_cache_hit_rate_stats(). Also + * added initialization for cache_is_full flag and for + * resize_ctl. + * + * JRM -- 11/12/04 + * Added initialization for the new size_decreased field. + * + * JRM -- 11/17/04 + * Added/updated initialization for the automatic cache + * size control data structures. + * *------------------------------------------------------------------------- */ @@ -2179,51 +1872,118 @@ H5C_create(size_t max_cache_size, * the fields. */ - cache_ptr->magic = H5C__H5C_T_MAGIC; + cache_ptr->magic = H5C__H5C_T_MAGIC; - cache_ptr->max_type_id = max_type_id; - cache_ptr->type_name_table_ptr = type_name_table_ptr; + cache_ptr->max_type_id = max_type_id; + cache_ptr->type_name_table_ptr = type_name_table_ptr; - cache_ptr->max_cache_size = max_cache_size; - cache_ptr->min_clean_size = min_clean_size; + cache_ptr->max_cache_size = max_cache_size; + cache_ptr->min_clean_size = min_clean_size; - cache_ptr->check_write_permitted = check_write_permitted; + cache_ptr->check_write_permitted = check_write_permitted; - cache_ptr->index_len = 0; - cache_ptr->index_size = (size_t)0; + cache_ptr->index_len = 0; + cache_ptr->index_size = (size_t)0; cache_ptr->slist_len = 0; - cache_ptr->slist_size = (size_t)0; + cache_ptr->slist_size = (size_t)0; for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ ) { (cache_ptr->index)[i] = NULL; } - cache_ptr->pl_len = 0; - cache_ptr->pl_size = (size_t)0; - cache_ptr->pl_head_ptr = NULL; - cache_ptr->pl_tail_ptr = NULL; - - cache_ptr->LRU_list_len = 0; - cache_ptr->LRU_list_size = (size_t)0; - cache_ptr->LRU_head_ptr = NULL; - cache_ptr->LRU_tail_ptr = NULL; + cache_ptr->pl_len = 0; + cache_ptr->pl_size = (size_t)0; + cache_ptr->pl_head_ptr = NULL; + cache_ptr->pl_tail_ptr = NULL; + + cache_ptr->LRU_list_len = 0; + cache_ptr->LRU_list_size = (size_t)0; + cache_ptr->LRU_head_ptr = NULL; + cache_ptr->LRU_tail_ptr = NULL; + + cache_ptr->cLRU_list_len = 0; + cache_ptr->cLRU_list_size = (size_t)0; + cache_ptr->cLRU_head_ptr = NULL; + cache_ptr->cLRU_tail_ptr = NULL; + + cache_ptr->dLRU_list_len = 0; + cache_ptr->dLRU_list_size = (size_t)0; + cache_ptr->dLRU_head_ptr = NULL; + cache_ptr->dLRU_tail_ptr = NULL; + + cache_ptr->size_increase_possible = FALSE; + cache_ptr->size_decrease_possible = FALSE; + cache_ptr->resize_enabled = FALSE; + cache_ptr->cache_full = FALSE; + cache_ptr->size_decreased = FALSE; + + (cache_ptr->resize_ctl).version = H5C__CURR_AUTO_SIZE_CTL_VER; + (cache_ptr->resize_ctl).rpt_fcn = NULL; + (cache_ptr->resize_ctl).set_initial_size = FALSE; + (cache_ptr->resize_ctl).initial_size = H5C__DEF_AR_INIT_SIZE; + (cache_ptr->resize_ctl).min_clean_fraction = H5C__DEF_AR_MIN_CLEAN_FRAC; + (cache_ptr->resize_ctl).max_size = H5C__DEF_AR_MAX_SIZE; + (cache_ptr->resize_ctl).min_size = H5C__DEF_AR_MIN_SIZE; + (cache_ptr->resize_ctl).epoch_length = H5C__DEF_AR_EPOCH_LENGTH; + + (cache_ptr->resize_ctl).incr_mode = H5C_incr__off; + (cache_ptr->resize_ctl).lower_hr_threshold = H5C__DEF_AR_LOWER_THRESHHOLD; + (cache_ptr->resize_ctl).increment = H5C__DEF_AR_INCREMENT; + (cache_ptr->resize_ctl).apply_max_increment = TRUE; + (cache_ptr->resize_ctl).max_increment = H5C__DEF_AR_MAX_INCREMENT; + + (cache_ptr->resize_ctl).decr_mode = H5C_decr__off; + (cache_ptr->resize_ctl).upper_hr_threshold = H5C__DEF_AR_UPPER_THRESHHOLD; + (cache_ptr->resize_ctl).decrement = H5C__DEF_AR_DECREMENT; + (cache_ptr->resize_ctl).apply_max_decrement = TRUE; + (cache_ptr->resize_ctl).max_decrement = H5C__DEF_AR_MAX_DECREMENT; + (cache_ptr->resize_ctl).epochs_before_eviction = H5C__DEF_AR_EPCHS_B4_EVICT; + (cache_ptr->resize_ctl).apply_empty_reserve = TRUE; + (cache_ptr->resize_ctl).empty_reserve = H5C__DEF_AR_EMPTY_RESERVE; + + cache_ptr->epoch_markers_active = 0; + + /* no need to initialize the ring buffer itself */ + cache_ptr->epoch_marker_ringbuf_first = 1; + cache_ptr->epoch_marker_ringbuf_last = 0; + cache_ptr->epoch_marker_ringbuf_size = 0; + + for ( i = 0; i < H5C__MAX_EPOCH_MARKERS; i++ ) + { + (cache_ptr->epoch_marker_active)[i] = FALSE; + + ((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i; + ((cache_ptr->epoch_markers)[i]).size = (size_t)0; + ((cache_ptr->epoch_markers)[i]).type = &epoch_marker_class; + ((cache_ptr->epoch_markers)[i]).is_dirty = FALSE; + ((cache_ptr->epoch_markers)[i]).is_protected = FALSE; + ((cache_ptr->epoch_markers)[i]).in_slist = FALSE; + ((cache_ptr->epoch_markers)[i]).ht_next = NULL; + ((cache_ptr->epoch_markers)[i]).ht_prev = NULL; + ((cache_ptr->epoch_markers)[i]).next = NULL; + ((cache_ptr->epoch_markers)[i]).prev = NULL; + ((cache_ptr->epoch_markers)[i]).aux_next = NULL; + ((cache_ptr->epoch_markers)[i]).aux_prev = NULL; +#if H5C_COLLECT_CACHE_ENTRY_STATS + ((cache_ptr->epoch_markers)[i]).accesses = 0; + ((cache_ptr->epoch_markers)[i]).clears = 0; + ((cache_ptr->epoch_markers)[i]).flushes = 0; +#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ + } - cache_ptr->cLRU_list_len = 0; - cache_ptr->cLRU_list_size = (size_t)0; - cache_ptr->cLRU_head_ptr = NULL; - cache_ptr->cLRU_tail_ptr = NULL; + if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) { - cache_ptr->dLRU_list_len = 0; - cache_ptr->dLRU_list_size = (size_t)0; - cache_ptr->dLRU_head_ptr = NULL; - cache_ptr->dLRU_tail_ptr = NULL; + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \ + "H5C_reset_cache_hit_rate_stats failed.") + } H5C_stats__reset(cache_ptr); - cache_ptr->skip_file_checks = FALSE; - cache_ptr->skip_dxpl_id_checks = FALSE; + cache_ptr->skip_file_checks = FALSE; + cache_ptr->skip_dxpl_id_checks = FALSE; /* Set return value */ ret_value = cache_ptr; @@ -2251,19 +2011,171 @@ done: /*------------------------------------------------------------------------- - * Function: H5C_dest + * Function: H5C_def_auto_resize_rpt_fcn * - * Purpose: Flush all data to disk and destroy the cache. + * Purpose: Print results of a automatic cache resize. * - * This function fails if any object are protected since the - * resulting file might not be consistent. + * This function should only be used where HDprintf() behaves + * well -- i.e. not on Windows. + * + * Return: void * - * The primary_dxpl_id and secondary_dxpl_id parameters - * specify the dxpl_ids used on the first write occasioned - * by the destroy (primary_dxpl_id), and on all subsequent - * writes (secondary_dxpl_id). This is useful in the metadata - * cache, but may not be needed elsewhere. If so, just use the - * same dxpl_id for both parameters. + * Programmer: John Mainzer + * 10/27/04 + * + * Modifications: + * + * JRM -- 11/22/04 + * Reworked function to adapt it to the addition of the + * ageout method of cache size reduction. + * + *------------------------------------------------------------------------- + */ +void +H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr, + int32_t version, + double hit_rate, + enum H5C_resize_status status, + size_t old_max_cache_size, + size_t new_max_cache_size, + size_t old_min_clean_size, + size_t new_min_clean_size) +{ + HDassert( cache_ptr != NULL ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER ); + + switch ( status ) + { + case in_spec: + HDfprintf(stdout, "Auto cache resize -- no change. "); + HDfprintf(stdout, "(hit rate = %lf)\n", hit_rate); + break; + + case increase: + HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold ); + HDassert( old_max_cache_size < new_max_cache_size ); + + HDfprintf(stdout, "Auto cache resize -- hit rate (%lf) ", hit_rate); + + HDfprintf(stdout, "out of bounds low (%6.5lf).\n", + (cache_ptr->resize_ctl).lower_hr_threshold); + + HDfprintf(stdout, + " cache size increased from (%Zu/%Zu) to (%Zu/%Zu).\n", + old_max_cache_size, + old_min_clean_size, + new_max_cache_size, + new_min_clean_size); + break; + + case decrease: + HDassert( old_max_cache_size > new_max_cache_size ); + + switch ( (cache_ptr->resize_ctl).decr_mode ) + { + case H5C_decr__threshold: + HDassert( hit_rate > + (cache_ptr->resize_ctl).upper_hr_threshold ); + + HDfprintf(stdout, + "Auto cache resize -- decrease by threshold. "); + + HDfprintf(stdout, "HR = %lf > %6.5lf\n", + hit_rate, + (cache_ptr->resize_ctl).upper_hr_threshold); + + HDfprintf(stdout, "out of bounds high (%6.5lf).\n", + (cache_ptr->resize_ctl).upper_hr_threshold); + break; + + case H5C_decr__age_out: + HDfprintf(stdout, + "Auto cache resize -- decrease by ageout. "); + HDfprintf(stdout, "HR = %lf\n", hit_rate); + break; + + case H5C_decr__age_out_with_threshold: + HDassert( hit_rate > + (cache_ptr->resize_ctl).upper_hr_threshold ); + + HDfprintf(stdout, + "Auto cache resize -- decrease by ageout with "); + HDfprintf(stdout, "threshold. HR = %lf > %6.5lf\n", + hit_rate, + (cache_ptr->resize_ctl).upper_hr_threshold); + break; + + default: + HDfprintf(stdout, + "Auto cache resize -- decrease by unknown mode."); + HDfprintf(stdout, " HR = %lf\n", hit_rate); + } + + HDfprintf(stdout, + " cache size decreased from (%Zu/%Zu) to (%Zu/%Zu).\n", + old_max_cache_size, + old_min_clean_size, + new_max_cache_size, + new_min_clean_size); + break; + + case at_max_size: + HDfprintf(stdout, "Auto cache resize -- hit rate (%lf) ", hit_rate); + HDfprintf(stdout, "out of bounds low (%6.5lf).\n", + (cache_ptr->resize_ctl).lower_hr_threshold); + HDfprintf(stdout, " cache already at maximum size so no change.\n"); + break; + + case at_min_size: + HDfprintf(stdout, "Auto cache resize -- hit rate (%lf) ", hit_rate); + HDfprintf(stdout, "-- can't decrease.\n"); + HDfprintf(stdout, " cache already at minimum size.\n"); + break; + + case increase_disabled: + HDfprintf(stdout, "Auto cache resize -- increase disabled -- "); + HDfprintf(stdout, "HR = %lf.", hit_rate); + break; + + case decrease_disabled: + HDfprintf(stdout, "Auto cache resize -- decrease disabled -- "); + HDfprintf(stdout, "HR = %lf.\n", hit_rate); + break; + + case not_full: + HDassert( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold ); + + HDfprintf(stdout, "Auto cache resize -- hit rate (%lf) ", hit_rate); + HDfprintf(stdout, "out of bounds low (%6.5lf).\n", + (cache_ptr->resize_ctl).lower_hr_threshold); + HDfprintf(stdout, " cache not full so no increase in size.\n"); + break; + + default: + HDfprintf(stdout, "Auto cache resize -- unknown status code.\n"); + break; + } + + return; + +} /* H5C_def_auto_resize_rpt_fcn() */ + + +/*------------------------------------------------------------------------- + * Function: H5C_dest + * + * Purpose: Flush all data to disk and destroy the cache. + * + * This function fails if any object are protected since the + * resulting file might not be consistent. + * + * The primary_dxpl_id and secondary_dxpl_id parameters + * specify the dxpl_ids used on the first write occasioned + * by the destroy (primary_dxpl_id), and on all subsequent + * writes (secondary_dxpl_id). This is useful in the metadata + * cache, but may not be needed elsewhere. If so, just use the + * same dxpl_id for both parameters. * * Note that *cache_ptr has been freed upon successful return. * @@ -2274,6 +2186,8 @@ done: * * Modifications: * + * None. + * *------------------------------------------------------------------------- */ herr_t @@ -2282,7 +2196,7 @@ H5C_dest(H5F_t * f, hid_t secondary_dxpl_id, H5C_t * cache_ptr) { - herr_t ret_value=SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5C_dest, FAIL) @@ -2328,6 +2242,8 @@ done: * * Modifications: * + * None. + * *------------------------------------------------------------------------- */ herr_t @@ -2394,6 +2310,13 @@ done: * JRM -- 7/20/04 * Modified the function for the addition of the hash table. * + * JRM -- 11/22/04 + * Added code to remove all epoch markers (if any) from the + * LRU list before a destroy. Strictly speaking, this isn't + * necessary, as the marker entries reside only in the LRU + * list, never in the index or in the tree. However, it + * never hurts to tidy up. + * *------------------------------------------------------------------------- */ herr_t @@ -2423,6 +2346,18 @@ H5C_flush_cache(H5F_t * f, HDassert( cache_ptr->skip_file_checks || f ); HDassert( cache_ptr->slist_ptr ); + if ( ( destroy ) && ( cache_ptr->epoch_markers_active > 0 ) ) { + + status = H5C__autoadjust__ageout__remove_all_markers(cache_ptr); + + if ( status != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "error removing all epoch markers.") + } + } + + if ( cache_ptr->slist_len == 0 ) { node_ptr = NULL; @@ -2464,7 +2399,7 @@ H5C_flush_cache(H5F_t * f, &first_flush, FALSE); if ( status < 0 ) { - + /* This shouldn't happen -- if it does, we are toast so * just scream and die. */ @@ -2532,7 +2467,7 @@ H5C_flush_cache(H5F_t * f, &first_flush, FALSE); if ( status < 0 ) { - + /* This shouldn't happen -- if it does, we are toast so * just scream and die. */ @@ -2585,6 +2520,179 @@ done: /*------------------------------------------------------------------------- + * Function: H5C_get_cache_auto_resize_config + * + * Purpose: Copy the current configuration of the cache automatic + * re-sizing function into the instance of H5C_auto_size_ctl_t + * pointed to by config_ptr. + * + * Return: SUCCEED on success, and FAIL on failure. + * + * Programmer: John Mainzer + * 10/8/04 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +herr_t +H5C_get_cache_auto_resize_config(H5C_t * cache_ptr, + H5C_auto_size_ctl_t *config_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5C_get_cache_auto_resize_config, FAIL) + + if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") + } + + if ( config_ptr == NULL ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad config_ptr on entry.") + } + + *config_ptr = cache_ptr->resize_ctl; + + config_ptr->set_initial_size = FALSE; + config_ptr->initial_size = cache_ptr->max_cache_size; + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_get_cache_auto_resize_config() */ + + +/*------------------------------------------------------------------------- + * Function: H5C_get_cache_size + * + * Purpose: Return the cache maximum size, the minimum clean size, the + * current size, and the current number of entries in + * *max_size_ptr, *min_clean_size_ptr, *cur_size_ptr, and + * *cur_num_entries_ptr respectively. If any of these + * parameters are NULL, skip that value. + * + * Return: SUCCEED on success, and FAIL on failure. + * + * Programmer: John Mainzer + * 10/8/04 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +herr_t +H5C_get_cache_size(H5C_t * cache_ptr, + size_t * max_size_ptr, + size_t * min_clean_size_ptr, + size_t * cur_size_ptr, + int32_t * cur_num_entries_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5C_get_cache_size, FAIL) + + if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") + } + + if ( max_size_ptr != NULL ) { + + *max_size_ptr = cache_ptr->max_cache_size; + } + + if ( min_clean_size_ptr != NULL ) { + + *min_clean_size_ptr = cache_ptr->min_clean_size; + } + + if ( cur_size_ptr != NULL ) { + + *cur_size_ptr = cache_ptr->index_size; + } + + if ( cur_num_entries_ptr != NULL ) { + + *cur_num_entries_ptr = cache_ptr->index_len; + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_get_cache_size() */ + + +/*------------------------------------------------------------------------- + * Function: H5C_get_cache_hit_rate + * + * Purpose: Compute and return the current cache hit rate in + * *hit_rate_ptr. If there have been no accesses since the + * last time the cache hit rate stats were reset, set + * *hit_rate_ptr to 0.0. On error, *hit_rate_ptr is + * undefined. + * + * Return: SUCCEED on success, and FAIL on failure. + * + * Programmer: John Mainzer + * 10/7/04 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +herr_t +H5C_get_cache_hit_rate(H5C_t * cache_ptr, + double * hit_rate_ptr) + +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5C_get_cache_hit_rate, FAIL) + + if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") + } + + if ( hit_rate_ptr == NULL ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad hit_rate_ptr on entry.") + } + + HDassert( cache_ptr->cache_hits >= 0 ); + HDassert( cache_ptr->cache_accesses >= cache_ptr->cache_hits ); + + if ( cache_ptr->cache_accesses > 0 ) { + + *hit_rate_ptr = ((double)(cache_ptr->cache_hits)) / + ((double)(cache_ptr->cache_accesses)); + + } else { + + *hit_rate_ptr = 0.0; + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_get_cache_hit_rate() */ + + +/*------------------------------------------------------------------------- * Function: H5C_insert_entry * * Purpose: Adds the specified thing to the cache. The thing need not @@ -2614,6 +2722,14 @@ done: * JRM -- 7/21/04 * Updated function for the addition of the hash table. * + * JRM -- 10/28/04 + * Added code to set the cache_full flag to TRUE when ever + * we need to make space in the cache. + * + * JRM --11/22/04 + * Updated function for the addition of the first_flush_ptr + * parameter to H5C_make_space_in_cache(). + * *------------------------------------------------------------------------- */ @@ -2628,6 +2744,7 @@ H5C_insert_entry(H5F_t * f, { herr_t result; herr_t ret_value = SUCCEED; /* Return value */ + hbool_t first_flush = TRUE; hbool_t write_permitted = TRUE; H5C_cache_entry_t * entry_ptr; H5C_cache_entry_t * test_entry_ptr; @@ -2673,6 +2790,8 @@ H5C_insert_entry(H5F_t * f, size_t space_needed; + cache_ptr->cache_full = TRUE; + if ( cache_ptr->check_write_permitted != NULL ) { result = (cache_ptr->check_write_permitted)(f, @@ -2720,7 +2839,8 @@ H5C_insert_entry(H5F_t * f, secondary_dxpl_id, cache_ptr, space_needed, - write_permitted); + write_permitted, + &first_flush); if ( result < 0 ) { @@ -2917,26 +3037,39 @@ done: * * Modifications: * - * JRM - 7/21/04 + * JRM -- 7/21/04 * Updated for the addition of the hash table. * + * JRM -- 10/28/04 + * Added code to set cache_full to TRUE whenever we try to + * make space in the cache. + * + * JRM -- 11/12/04 + * Added code to call to H5C_make_space_in_cache() after the + * call to H5C__auto_adjust_cache_size() if that function + * sets the size_decreased flag is TRUE. + * *------------------------------------------------------------------------- */ void * H5C_protect(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - H5C_t * cache_ptr, - const H5C_class_t * type, - haddr_t addr, - const void * udata1, - void * udata2) + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + const H5C_class_t * type, + haddr_t addr, + const void * udata1, + void * udata2) { hbool_t hit = FALSE; + hbool_t first_flush = TRUE; + hbool_t have_write_permitted = FALSE; + hbool_t write_permitted = TRUE; + herr_t result; void * thing = NULL; H5C_cache_entry_t * entry_ptr; - void * ret_value; /* Return value */ + void * ret_value = NULL; /* Return value */ FUNC_ENTER_NOAPI(H5C_protect, NULL) @@ -2960,6 +3093,7 @@ H5C_protect(H5F_t * f, } else { /* must try to load the entry from disk. */ hit = FALSE; + thing = H5C_load_entry(f, primary_dxpl_id, type, addr, udata1, udata2, cache_ptr->skip_file_checks); @@ -2974,10 +3108,10 @@ H5C_protect(H5F_t * f, if ( (cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size ) { - hbool_t write_permitted = TRUE; - herr_t result; size_t space_needed; + cache_ptr->cache_full = TRUE; + if ( cache_ptr->check_write_permitted != NULL ) { result = (cache_ptr->check_write_permitted)(f, @@ -2987,8 +3121,15 @@ H5C_protect(H5F_t * f, if ( result < 0 ) { HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \ - "Can't get write_permitted") + "Can't get write_permitted 1") + + } else { + + have_write_permitted = TRUE; } + } else { + + have_write_permitted = TRUE; } HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE ); @@ -3022,12 +3163,13 @@ H5C_protect(H5F_t * f, result = H5C_make_space_in_cache(f, primary_dxpl_id, secondary_dxpl_id, cache_ptr, - space_needed, write_permitted); + space_needed, write_permitted, + &first_flush); if ( result < 0 ) { HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \ - "H5C_make_space_in_cache failed.") + "H5C_make_space_in_cache failed 1.") } } @@ -3059,8 +3201,80 @@ H5C_protect(H5F_t * f, ret_value = thing; + H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) + H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) + if ( ( cache_ptr->resize_enabled ) && + ( cache_ptr->cache_accesses >= + (cache_ptr->resize_ctl).epoch_length ) ) { + + if ( ! have_write_permitted ) { + + if ( cache_ptr->check_write_permitted != NULL ) { + + result = (cache_ptr->check_write_permitted)(f, + primary_dxpl_id, + &write_permitted); + + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \ + "Can't get write_permitted 2") + + } else { + + have_write_permitted = TRUE; + } + } else { + + have_write_permitted = TRUE; + } + } + + result = H5C__auto_adjust_cache_size(cache_ptr, + f, + primary_dxpl_id, + secondary_dxpl_id, + write_permitted, + &first_flush); + + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \ + "Cache auto-resize failed.") + + } else if ( cache_ptr->size_decreased ) { + + cache_ptr->size_decreased = FALSE; + + /* check to see if the cache is now oversized due to the cache + * size reduction. If it is, try to evict enough entries to + * bring the cache size down to the current maximum cache size. + */ + if ( cache_ptr->index_size > cache_ptr->max_cache_size ) { + + size_t space_needed; + + cache_ptr->cache_full = TRUE; + + space_needed = cache_ptr->index_size - + cache_ptr->max_cache_size + 1; + + result = H5C_make_space_in_cache(f, primary_dxpl_id, + secondary_dxpl_id, cache_ptr, + space_needed, write_permitted, + &first_flush); + + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \ + "H5C_make_space_in_cache failed 2.") + } + } + } + } + done: FUNC_LEAVE_NOAPI(ret_value) @@ -3069,39 +3283,14 @@ done: /*------------------------------------------------------------------------- - * Function: H5C_unprotect - * - * Purpose: Undo an H5C_protect() call -- specifically, mark the - * entry as unprotected, remove it from the protected list, - * and give it back to the replacement policy. - * - * The TYPE and ADDR arguments must be the same as those in - * the corresponding call to H5C_protect() and the THING - * argument must be the value returned by that call to - * H5C_protect(). - * - * The primary_dxpl_id and secondary_dxpl_id parameters - * specify the dxpl_ids used on the first write occasioned - * by the unprotect (primary_dxpl_id), and on all subsequent - * writes (secondary_dxpl_id). Since an uprotect cannot - * occasion a write at present, all this is moot for now. - * However, things change, and in any case, - * H5C_flush_single_entry() needs primary_dxpl_id and - * secondary_dxpl_id in its parameter list. - * - * The function can't cause a read either, so the dxpl_id - * parameters are moot in this case as well. * - * Return: Non-negative on success/Negative on failure + * Function: H5C_reset_cache_hit_rate_stats() * - * If the deleted flag is TRUE, simply remove the target entry - * from the cache, clear it, and free it without writing it to - * disk. + * Purpose: Reset the cache hit rate computation fields. * - * Return: Non-negative on success/Negative on failure + * Return: SUCCEED on success, and FAIL on failure. * - * Programmer: John Mainzer - * 6/2/04 + * Programmer: John Mainzer, 10/5/04 * * Modifications: * @@ -3112,109 +3301,410 @@ done: * *------------------------------------------------------------------------- */ + herr_t -H5C_unprotect(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - H5C_t * cache_ptr, - const H5C_class_t * type, - haddr_t addr, - void * thing, - hbool_t deleted) +H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr) { - herr_t ret_value = SUCCEED; /* Return value */ - H5C_cache_entry_t * entry_ptr; - H5C_cache_entry_t * test_entry_ptr; + herr_t ret_value = SUCCEED; /* Return value */ - FUNC_ENTER_NOAPI(H5C_unprotect, FAIL) + FUNC_ENTER_NOAPI(H5C_reset_cache_hit_rate_stats, FAIL) - HDassert( cache_ptr ); - HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); - HDassert( cache_ptr->skip_file_checks || f ); - HDassert( type ); - HDassert( type->clear ); - HDassert( type->flush ); - HDassert( H5F_addr_defined(addr) ); - HDassert( thing ); + if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { - entry_ptr = (H5C_cache_entry_t *)thing; + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") + } - HDassert( entry_ptr->addr == addr ); - HDassert( entry_ptr->type == type ); + cache_ptr->cache_hits = 0; + cache_ptr->cache_accesses = 0; - if ( ! (entry_ptr->is_protected) ) { +done: - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "Entry already unprotected??") + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_reset_cache_hit_rate_stats() */ + + +/*------------------------------------------------------------------------- + * Function: H5C_set_cache_auto_resize_config + * + * Purpose: Set the cache automatic resize configuration to the + * provided values if they are in range, and fail if they + * are not. + * + * If the new configuration enables automatic cache resizing, + * coerce the cache max size and min clean size into agreement + * with the new policy and re-set the full cache hit rate + * stats. + * + * Return: SUCCEED on success, and FAIL on failure. + * + * Programmer: John Mainzer + * 10/8/04 + * + * Modifications: + * + * JRM -- 11/18/04 + * Reworked function to match major changes in + * H5C_auto_size_ctl_t. + * + *------------------------------------------------------------------------- + */ + +herr_t +H5C_set_cache_auto_resize_config(H5C_t * cache_ptr, + H5C_auto_size_ctl_t *config_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + herr_t result; + size_t new_max_cache_size; + size_t new_min_clean_size; + + FUNC_ENTER_NOAPI(H5C_set_cache_auto_resize_config, FAIL) + + if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") } - H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) + if ( config_ptr == NULL ) { - entry_ptr->is_protected = FALSE; + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry.") + } + + if ( config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version.") + } + + /* check general configuration section of the config: */ + if ( ( config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE ) + || + ( config_ptr->max_size < config_ptr->min_size ) + || + ( config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE ) + || + ( ( config_ptr->set_initial_size ) && + ( config_ptr->initial_size > config_ptr->max_size ) + ) + || + ( ( config_ptr->set_initial_size ) && + ( config_ptr->initial_size < config_ptr->min_size ) + ) + || + ( config_ptr->min_clean_fraction > 1.0 ) + || + ( config_ptr->min_clean_fraction < 0.0 ) + || + ( config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH ) + || + ( config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH ) + ) { + + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \ + "error in general configuration fields of new config.") + } + + /* check size increase control fields of the config: */ + if ( ( ( config_ptr->incr_mode != H5C_incr__off ) + && + ( config_ptr->incr_mode != H5C_incr__threshold ) + ) + || + ( ( config_ptr->incr_mode == H5C_incr__threshold ) + && + ( ( config_ptr->lower_hr_threshold < 0.0 ) + || + ( config_ptr->lower_hr_threshold > 1.0 ) + || + ( config_ptr->increment < 1.0 ) + /* no need to check max_increment, as it is a size_t, + * and thus must be non-negative. + */ + ) + ) + ) { + + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \ + "error in the size increase control fields of new config.") + } + + /* check size decrease control fields of the config: */ + if ( ( ( config_ptr->decr_mode != H5C_decr__off ) + && + ( config_ptr->decr_mode != H5C_decr__threshold ) + && + ( config_ptr->decr_mode != H5C_decr__age_out ) + && + ( config_ptr->decr_mode != H5C_decr__age_out_with_threshold ) + ) + || + ( ( config_ptr->decr_mode == H5C_decr__threshold ) + && + ( ( config_ptr->upper_hr_threshold > 1.0 ) + || + ( config_ptr->decrement > 1.0 ) + || + ( config_ptr->decrement < 0.0 ) + /* no need to check max_decrement as it is a size_t + * and thus must be non-negative. + */ + ) + ) + || + ( ( ( config_ptr->decr_mode == H5C_decr__age_out ) + || + ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold ) + ) + && + ( + ( config_ptr->epochs_before_eviction < 1 ) + || + ( config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS ) + || + ( ( config_ptr->apply_empty_reserve ) + && + ( config_ptr->empty_reserve < 0.0 ) + ) + || + ( ( config_ptr->apply_empty_reserve ) + && + ( config_ptr->empty_reserve > 1.0 ) + ) + /* no need to check max_decrement as it is a size_t + * and thus must be non-negative. + */ + ) + ) + || + ( ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold ) + && + ( config_ptr->upper_hr_threshold > 1.0 ) + ) + ) { + + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \ + "error in the size decrease control fields of new config.") + } + + /* check for conflicts between size increase and size decrease controls: */ + if ( ( config_ptr->incr_mode == H5C_incr__threshold ) + && + ( ( config_ptr->decr_mode == H5C_decr__threshold ) + || + ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold ) + ) + && + ( config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold ) + ) { + + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, \ + "conflicting threshold fields in new config.") + } + + cache_ptr->size_increase_possible = TRUE; /* will set to FALSE if needed */ + cache_ptr->size_decrease_possible = TRUE; /* will set to FALSE if needed */ + + switch ( config_ptr->incr_mode ) + { + case H5C_incr__off: + cache_ptr->size_increase_possible = FALSE; + break; + + case H5C_incr__threshold: + if ( ( config_ptr->lower_hr_threshold <= 0.0 ) || + ( config_ptr->increment <= 1.0 ) || + ( ( config_ptr->apply_max_increment ) && + ( config_ptr->max_increment <= 0 ) ) ) { + + cache_ptr->size_increase_possible = FALSE; + } + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?.") + break; + } + + switch ( config_ptr->decr_mode ) + { + case H5C_decr__off: + cache_ptr->size_decrease_possible = FALSE; + break; + + case H5C_decr__threshold: + if ( ( config_ptr->upper_hr_threshold >= 1.0 ) || + ( config_ptr->decrement >= 1.0 ) || + ( ( config_ptr->apply_max_decrement ) && + ( config_ptr->max_decrement <= 0 ) ) ) { + + cache_ptr->size_decrease_possible = FALSE; + } + break; + + case H5C_decr__age_out: + if ( ( ( config_ptr->apply_empty_reserve ) && + ( config_ptr->empty_reserve >= 1.0 ) ) || + ( ( config_ptr->apply_max_decrement ) && + ( config_ptr->max_decrement <= 0 ) ) ) { - /* add the entry to the skip list if it is dirty, and it isn't already in - * the list. + cache_ptr->size_decrease_possible = FALSE; + } + break; + + case H5C_decr__age_out_with_threshold: + if ( ( ( config_ptr->apply_empty_reserve ) && + ( config_ptr->empty_reserve >= 1.0 ) ) || + ( ( config_ptr->apply_max_decrement ) && + ( config_ptr->max_decrement <= 0 ) ) || + ( config_ptr->upper_hr_threshold >= 1.0 ) ) { + + cache_ptr->size_decrease_possible = FALSE; + } + break; + + default: /* should be unreachable */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?.") + break; + } + + if ( config_ptr->max_size == config_ptr->min_size ) { + + cache_ptr->size_increase_possible = FALSE; + cache_ptr->size_decrease_possible = FALSE; + } + + cache_ptr->resize_enabled = cache_ptr->size_increase_possible || + cache_ptr->size_decrease_possible; + + cache_ptr->resize_ctl = *config_ptr; + + /* Resize the cache to the supplied initial value if requested, or as + * necessary to force it within the bounds of the current automatic + * cache resizing configuration. + * + * Note that the min_clean_fraction may have changed, so we + * go through the exercise even if the current size is within + * range and an initial size has not been provided. */ + if ( (cache_ptr->resize_ctl).set_initial_size ) { + + new_max_cache_size = (cache_ptr->resize_ctl).initial_size; + } + else if ( cache_ptr->max_cache_size > (cache_ptr->resize_ctl).max_size ) { - if ( ( entry_ptr->is_dirty ) && ( ! (entry_ptr->in_slist) ) ) { + new_max_cache_size = (cache_ptr->resize_ctl).max_size; + } + else if ( cache_ptr->max_cache_size < (cache_ptr->resize_ctl).min_size ) { - H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr) + new_max_cache_size = (cache_ptr->resize_ctl).min_size; + + } else { + + new_max_cache_size = cache_ptr->max_cache_size; } - /* this implementation of the "deleted" option is a bit inefficient, as - * we re-insert the entry to be deleted into the replacement policy - * data structures, only to remove them again. Depending on how often - * we do this, we may want to optimize a bit. + new_min_clean_size = (size_t) + ((double)new_max_cache_size * + ((cache_ptr->resize_ctl).min_clean_fraction)); + + + /* since new_min_clean_size is of type size_t, we have * - * On the other hand, this implementation is reasonably clean, and - * makes good use of existing code. - * JRM - 5/19/04 + * ( 0 <= new_min_clean_size ) + * + * by definition. */ - if ( deleted ) { + HDassert( new_min_clean_size <= new_max_cache_size ); + HDassert( (cache_ptr->resize_ctl).min_size <= new_max_cache_size ); + HDassert( new_max_cache_size <= (cache_ptr->resize_ctl).max_size ); - /* the following first flush flag will never be used as we are - * calling H5C_flush_single_entry with both the H5F_FLUSH_CLEAR_ONLY - * and H5F_FLUSH_INVALIDATE flags. However, it is needed for the - * function call. - */ - hbool_t dummy_first_flush = TRUE; + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; - /* verify that the target entry is in the cache. */ + if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) { - H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "H5C_reset_cache_hit_rate_stats failed.") + } - if ( test_entry_ptr == NULL ) { + /* remove excess epoch markers if any */ + if ( ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold ) || + ( config_ptr->decr_mode == H5C_decr__age_out ) ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "entry not in hash table?!?.") - } - else if ( test_entry_ptr != entry_ptr ) { + if ( cache_ptr->epoch_markers_active > + (cache_ptr->resize_ctl).epochs_before_eviction ) { - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "hash table contains multiple entries for addr?!?.") + result = + H5C__autoadjust__ageout__remove_excess_markers(cache_ptr); + + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "can't remove excess epoch markers.") + } } + } else if ( cache_ptr->epoch_markers_active > 0 ) { - if ( H5C_flush_single_entry(f, - primary_dxpl_id, - secondary_dxpl_id, - cache_ptr, - type, - addr, - (H5F_FLUSH_CLEAR_ONLY|H5F_FLUSH_INVALIDATE), - &dummy_first_flush, - TRUE) < 0 ) { + result = H5C__autoadjust__ageout__remove_all_markers(cache_ptr); - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush.") + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "error removing all epoch markers.") } } - H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) - done: FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_unprotect() */ +} /* H5C_set_cache_auto_resize_config() */ + + +/*------------------------------------------------------------------------- + * Function: H5C_set_skip_flags + * + * Purpose: Set the values of the skip sanity check flags. + * + * This function and the skip sanity check flags were created + * for the convenience of the test bed. However it is + * possible that there may be other uses for the flags. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 6/11/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +herr_t +H5C_set_skip_flags(H5C_t * cache_ptr, + hbool_t skip_file_checks, + hbool_t skip_dxpl_id_checks) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(H5C_set_skip_flags, FAIL) + + /* This would normally be an assert, but we need to use an HGOTO_ERROR + * call to shut up the compiler. + */ + if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr") + } + + cache_ptr->skip_file_checks = skip_file_checks; + cache_ptr->skip_dxpl_id_checks = skip_dxpl_id_checks; + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_set_skip_flags() */ /*------------------------------------------------------------------------- @@ -3355,7 +3845,7 @@ H5C_stats(H5C_t * cache_ptr, (long)(cache_ptr->max_index_len)); HDfprintf(stdout, - " current (max) skip list size / length = %ld (%ld) / %ld (%ld)\n", + " current (max) slist size / length = %ld (%ld) / %ld (%ld)\n", (long)(cache_ptr->slist_size), (long)(cache_ptr->max_slist_size), (long)(cache_ptr->slist_len), @@ -3378,234 +3868,1400 @@ H5C_stats(H5C_t * cache_ptr, (long)(cache_ptr->cLRU_list_size), (long)(cache_ptr->cLRU_list_len)); - HDfprintf(stdout, - " current dirty LRU size / length = %ld / %ld\n", - (long)(cache_ptr->dLRU_list_size), - (long)(cache_ptr->dLRU_list_len)); + HDfprintf(stdout, + " current dirty LRU size / length = %ld / %ld\n", + (long)(cache_ptr->dLRU_list_size), + (long)(cache_ptr->dLRU_list_len)); + + HDfprintf(stdout, + " Total hits / misses / hit_rate = %ld / %ld / %f\n", + (long)total_hits, + (long)total_misses, + hit_rate); + + HDfprintf(stdout, + " Total clears / flushes / evictions = %ld / %ld / %ld\n", + (long)total_clears, + (long)total_flushes, + (long)total_evictions); + + HDfprintf(stdout, " Total insertions / renames = %ld / %ld\n", + (long)total_insertions, + (long)total_renames); + +#if H5C_COLLECT_CACHE_ENTRY_STATS + + HDfprintf(stdout, " aggregate max / min accesses = %d / %d\n", + (int)aggregate_max_accesses, + (int)aggregate_min_accesses); + + HDfprintf(stdout, " aggregate max_clears / max_flushes = %d / %d\n", + (int)aggregate_max_clears, + (int)aggregate_max_flushes); + + HDfprintf(stdout, " aggregate max_size = %d\n", + (int)aggregate_max_size); + + +#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ + + if ( display_detailed_stats ) + { + + for ( i = 0; i <= cache_ptr->max_type_id; i++ ) { + + HDfprintf(stdout, "\n"); + + HDfprintf(stdout, " Stats on %s:\n", + ((cache_ptr->type_name_table_ptr))[i]); + + if ( ( cache_ptr->hits[i] > 0 ) || ( cache_ptr->misses[i] > 0 ) ) { + + hit_rate = 100.0 * ((double)(cache_ptr->hits[i])) / + ((double)(cache_ptr->hits[i] + cache_ptr->misses[i])); + } else { + hit_rate = 0.0; + } + + HDfprintf(stdout, + " hits / misses / hit_rate = %ld / %ld / %f\n", + (long)(cache_ptr->hits[i]), + (long)(cache_ptr->misses[i]), + hit_rate); + + HDfprintf(stdout, + " clears / flushes / evictions = %ld / %ld / %ld\n", + (long)(cache_ptr->clears[i]), + (long)(cache_ptr->flushes[i]), + (long)(cache_ptr->evictions[i])); + + HDfprintf(stdout, + " insertions / renames = %ld / %ld\n", + (long)(cache_ptr->insertions[i]), + (long)(cache_ptr->renames[i])); + +#if H5C_COLLECT_CACHE_ENTRY_STATS + + HDfprintf(stdout, + " entry max / min accesses = %d / %d\n", + cache_ptr->max_accesses[i], + cache_ptr->min_accesses[i]); + + HDfprintf(stdout, + " entry max_clears / max_flushes = %d / %d\n", + cache_ptr->max_clears[i], + cache_ptr->max_flushes[i]); + + HDfprintf(stdout, + " entry max_size = %d\n", + (int)(cache_ptr->max_size[i])); + + +#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ + + } + } + + HDfprintf(stdout, "\n"); + +#endif /* H5C_COLLECT_CACHE_STATS */ + +done: + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_stats() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5C_stats__reset + * + * Purpose: Reset the stats fields to their initial values. + * + * Return: void + * + * Programmer: John Mainzer, 4/28/04 + * + * Modifications: + * + * JRM - 7/21/04 + * Updated for hash table related statistics. + * + *------------------------------------------------------------------------- + */ + +void +H5C_stats__reset(H5C_t * cache_ptr) +{ +#if H5C_COLLECT_CACHE_STATS + int i; +#endif /* H5C_COLLECT_CACHE_STATS */ + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + +#if H5C_COLLECT_CACHE_STATS + for ( i = 0; i <= cache_ptr->max_type_id; i++ ) + { + cache_ptr->hits[i] = 0; + cache_ptr->misses[i] = 0; + cache_ptr->insertions[i] = 0; + cache_ptr->clears[i] = 0; + cache_ptr->flushes[i] = 0; + cache_ptr->evictions[i] = 0; + cache_ptr->renames[i] = 0; + } + + cache_ptr->total_ht_insertions = 0; + cache_ptr->total_ht_deletions = 0; + cache_ptr->successful_ht_searches = 0; + cache_ptr->total_successful_ht_search_depth = 0; + cache_ptr->failed_ht_searches = 0; + cache_ptr->total_failed_ht_search_depth = 0; + + cache_ptr->max_index_len = 0; + cache_ptr->max_index_size = (size_t)0; + + cache_ptr->max_slist_len = 0; + cache_ptr->max_slist_size = (size_t)0; + + cache_ptr->max_pl_len = 0; + cache_ptr->max_pl_size = (size_t)0; + +#if H5C_COLLECT_CACHE_ENTRY_STATS + + for ( i = 0; i <= cache_ptr->max_type_id; i++ ) + { + cache_ptr->max_accesses[i] = 0; + cache_ptr->min_accesses[i] = 1000000; + cache_ptr->max_clears[i] = 0; + cache_ptr->max_flushes[i] = 0; + cache_ptr->max_size[i] = (size_t)0; + } + +#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ +#endif /* H5C_COLLECT_CACHE_STATS */ + + return; + +} /* H5C_stats__reset() */ + + +/*------------------------------------------------------------------------- + * Function: H5C_unprotect + * + * Purpose: Undo an H5C_protect() call -- specifically, mark the + * entry as unprotected, remove it from the protected list, + * and give it back to the replacement policy. + * + * The TYPE and ADDR arguments must be the same as those in + * the corresponding call to H5C_protect() and the THING + * argument must be the value returned by that call to + * H5C_protect(). + * + * The primary_dxpl_id and secondary_dxpl_id parameters + * specify the dxpl_ids used on the first write occasioned + * by the unprotect (primary_dxpl_id), and on all subsequent + * writes (secondary_dxpl_id). Since an uprotect cannot + * occasion a write at present, all this is moot for now. + * However, things change, and in any case, + * H5C_flush_single_entry() needs primary_dxpl_id and + * secondary_dxpl_id in its parameter list. + * + * The function can't cause a read either, so the dxpl_id + * parameters are moot in this case as well. + * + * Return: Non-negative on success/Negative on failure + * + * If the deleted flag is TRUE, simply remove the target entry + * from the cache, clear it, and free it without writing it to + * disk. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: John Mainzer + * 6/2/04 + * + * Modifications: + * + * JRM -- 7/21/04 + * Updated the function for the addition of the hash table. + * In particular, we now add dirty entries to the tree if + * they aren't in the tree already. + * + *------------------------------------------------------------------------- + */ +herr_t +H5C_unprotect(H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + const H5C_class_t * type, + haddr_t addr, + void * thing, + hbool_t deleted) +{ + herr_t ret_value = SUCCEED; /* Return value */ + H5C_cache_entry_t * entry_ptr; + H5C_cache_entry_t * test_entry_ptr; + + FUNC_ENTER_NOAPI(H5C_unprotect, FAIL) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( cache_ptr->skip_file_checks || f ); + HDassert( type ); + HDassert( type->clear ); + HDassert( type->flush ); + HDassert( H5F_addr_defined(addr) ); + HDassert( thing ); + + entry_ptr = (H5C_cache_entry_t *)thing; + + HDassert( entry_ptr->addr == addr ); + HDassert( entry_ptr->type == type ); + + if ( ! (entry_ptr->is_protected) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "Entry already unprotected??") + } + + H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL) + + entry_ptr->is_protected = FALSE; + + /* add the entry to the tree if it is dirty, and it isn't already in + * the tree. + */ + + if ( ( entry_ptr->is_dirty ) && ( ! (entry_ptr->in_slist) ) ) { + + H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr) + } + + /* this implementation of the "deleted" option is a bit inefficient, as + * we re-insert the entry to be deleted into the replacement policy + * data structures, only to remove them again. Depending on how often + * we do this, we may want to optimize a bit. + * + * On the other hand, this implementation is reasonably clean, and + * makes good use of existing code. + * JRM - 5/19/04 + */ + if ( deleted ) { + + /* the following first flush flag will never be used as we are + * calling H5C_flush_single_entry with both the H5F_FLUSH_CLEAR_ONLY + * and H5F_FLUSH_INVALIDATE flags. However, it is needed for the + * function call. + */ + hbool_t dummy_first_flush = TRUE; + + /* verify that the target entry is in the cache. */ + + H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) + + if ( test_entry_ptr == NULL ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "entry not in hash table?!?.") + } + else if ( test_entry_ptr != entry_ptr ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ + "hash table contains multiple entries for addr?!?.") + } + + if ( H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + type, + addr, + (H5F_FLUSH_CLEAR_ONLY|H5F_FLUSH_INVALIDATE), + &dummy_first_flush, + TRUE) < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush.") + } + } + + H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C_unprotect() */ + + +/*************************************************************************/ +/**************************** Private Functions: *************************/ +/*************************************************************************/ + +/*------------------------------------------------------------------------- + * + * Function: H5C__auto_adjust_cache_size + * + * Purpose: Obtain the current full cache hit rate, and compare it + * with the hit rate thresholds for modifying cache size. + * If one of the thresholds has been crossed, adjusts the + * size of the cache accordingly. + * + * The function then resets the full cache hit rate + * statistics, and exits. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + * + * Programmer: John Mainzer, 10/7/04 + * + * Modifications: + * + * JRM -- 11/18/04 + * Major re-write to support ageout method of cache size + * reduction, and to adjust to changes in the + * H5C_auto_size_ctl_t structure. + * + *------------------------------------------------------------------------- + */ + +static herr_t +H5C__auto_adjust_cache_size(H5C_t * cache_ptr, + H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + hbool_t write_permitted, + hbool_t * first_flush_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + herr_t result; + hbool_t inserted_epoch_marker = FALSE; + size_t new_max_cache_size = 0; + size_t old_max_cache_size = 0; + size_t new_min_clean_size = 0; + size_t old_min_clean_size = 0; + double hit_rate; + enum H5C_resize_status status = in_spec; /* will change if needed */ + + FUNC_ENTER_NOAPI_NOINIT(H5C__auto_adjust_cache_size) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( cache_ptr->cache_accesses >= + (cache_ptr->resize_ctl).epoch_length ); + HDassert( 0.0 <= (cache_ptr->resize_ctl).min_clean_fraction ); + HDassert( (cache_ptr->resize_ctl).min_clean_fraction <= 100.0 ); + + if ( !cache_ptr->resize_enabled ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled.") + } + + HDassert( ( (cache_ptr->resize_ctl).incr_mode != H5C_incr__off ) || \ + ( (cache_ptr->resize_ctl).decr_mode != H5C_decr__off ) ); + + if ( H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate.") + } + + HDassert( ( 0.0 <= hit_rate ) && ( hit_rate <= 1.0 ) ); + + switch ( (cache_ptr->resize_ctl).incr_mode ) + { + case H5C_incr__off: + if ( cache_ptr->size_increase_possible ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "size_increase_possible but H5C_incr__off?!?!?") + } + break; + + case H5C_incr__threshold: + if ( hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold ) { + + if ( ! cache_ptr->size_increase_possible ) { + + status = increase_disabled; + + } else if ( cache_ptr->max_cache_size >= + (cache_ptr->resize_ctl).max_size ) { + + HDassert( cache_ptr->max_cache_size == \ + (cache_ptr->resize_ctl).max_size ); + status = at_max_size; + + } else if ( ! cache_ptr->cache_full ) { + + status = not_full; + + } else { + + new_max_cache_size = (size_t) + (((double)(cache_ptr->max_cache_size)) * + (cache_ptr->resize_ctl).increment); + + /* clip to max size if necessary */ + if ( new_max_cache_size > + (cache_ptr->resize_ctl).max_size ) { + + new_max_cache_size = (cache_ptr->resize_ctl).max_size; + } + + /* clip to max increment if necessary */ + if ( ( (cache_ptr->resize_ctl).apply_max_increment ) && + ( (cache_ptr->max_cache_size + + (cache_ptr->resize_ctl).max_increment) < + new_max_cache_size ) ) { + + new_max_cache_size = cache_ptr->max_cache_size + + (cache_ptr->resize_ctl).max_increment; + } + + status = increase; + } + } + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.") + break; + } + + /* If the decr_mode is either age out or age out with threshold, we + * must run the marker maintenance code, whether we run the size + * reduction code or not. We do this in two places -- here we + * insert a new marker if the number of active epoch markers is + * is less than the the current epochs before eviction, and after + * the ageout call, we cycle the markers. + * + * However, we can't call the ageout code or cycle the markers + * unless there was a full complement of markers in place on + * entry. The inserted_epoch_marker flag is used to track this. + */ + + if ( ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out ) + || + ( (cache_ptr->resize_ctl).decr_mode == + H5C_decr__age_out_with_threshold + ) + ) + && + ( cache_ptr->epoch_markers_active < + (cache_ptr->resize_ctl).epochs_before_eviction + ) + ) { + + result = H5C__autoadjust__ageout__insert_new_marker(cache_ptr); + + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "can't insert new epoch marker.") + + } else { + + inserted_epoch_marker = TRUE; + } + } + + /* don't run the cache size decrease code unless the cache size + * increase code is disabled, or the size increase code sees no need + * for action. In either case, status == in_spec at this point. + */ + + if ( status == in_spec ) { + + switch ( (cache_ptr->resize_ctl).decr_mode ) + { + case H5C_decr__off: + break; + + case H5C_decr__threshold: + if ( hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold ) { + + if ( ! cache_ptr->size_decrease_possible ) { + + status = decrease_disabled; + + } else if ( cache_ptr->max_cache_size <= + (cache_ptr->resize_ctl).min_size ) { + + HDassert( cache_ptr->max_cache_size == + (cache_ptr->resize_ctl).min_size ); + status = at_min_size; + + } else { + + new_max_cache_size = (size_t) + (((double)(cache_ptr->max_cache_size)) * + (cache_ptr->resize_ctl).decrement); + + /* clip to min size if necessary */ + if ( new_max_cache_size < + (cache_ptr->resize_ctl).min_size ) { + + new_max_cache_size = + (cache_ptr->resize_ctl).min_size; + } + + /* clip to max decrement if necessary */ + if ( ( (cache_ptr->resize_ctl).apply_max_decrement ) && + ( ((cache_ptr->resize_ctl).max_decrement + + new_max_cache_size) < + cache_ptr->max_cache_size ) ) { + + new_max_cache_size = cache_ptr->max_cache_size - + (cache_ptr->resize_ctl).max_decrement; + } + + status = decrease; + } + } + break; + + case H5C_decr__age_out_with_threshold: + case H5C_decr__age_out: + if ( ! inserted_epoch_marker ) { + + if ( ! cache_ptr->size_decrease_possible ) { + + status = decrease_disabled; + + } else { + + result = H5C__autoadjust__ageout(cache_ptr, + hit_rate, + &status, + &new_max_cache_size, + f, + primary_dxpl_id, + secondary_dxpl_id, + write_permitted, + first_flush_ptr); + + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "ageout code failed.") + } + } + } + break; + + default: + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.") + break; + } + } + + /* cycle the epoch markers here if appropriate */ + if ( ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out ) + || + ( (cache_ptr->resize_ctl).decr_mode == + H5C_decr__age_out_with_threshold + ) + ) + && + ( ! inserted_epoch_marker ) + ) { + + /* move last epoch marker to the head of the LRU list */ + result = H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr); + + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "error cycling epoch marker.") + } + } + + if ( ( status == increase ) || ( status == decrease ) ) { + + old_max_cache_size = cache_ptr->max_cache_size; + old_min_clean_size = cache_ptr->min_clean_size; + + new_min_clean_size = (size_t) + ((double)new_max_cache_size * + ((cache_ptr->resize_ctl).min_clean_fraction)); + + /* new_min_clean_size is of size_t, and thus must be non-negative. + * Hence we have + * + * ( 0 <= new_min_clean_size ). + * + * by definition. + */ + HDassert( new_min_clean_size <= new_max_cache_size ); + HDassert( (cache_ptr->resize_ctl).min_size <= new_max_cache_size ); + HDassert( new_max_cache_size <= (cache_ptr->resize_ctl).max_size ); + + cache_ptr->max_cache_size = new_max_cache_size; + cache_ptr->min_clean_size = new_min_clean_size; + + if ( status == increase ) { + + cache_ptr->cache_full = FALSE; + + } else if ( status == decrease ) { + + cache_ptr->size_decreased = TRUE; + } + } + + if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) { + + (*((cache_ptr->resize_ctl).rpt_fcn)) + (cache_ptr, + H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, + hit_rate, + status, + old_max_cache_size, + new_max_cache_size, + old_min_clean_size, + new_min_clean_size); + } + + if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) { + + /* this should be impossible... */ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "H5C_reset_cache_hit_rate_stats failed.") + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C__auto_adjust_cache_size() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5C__autoadjust__ageout + * + * Purpose: Implement the ageout automatic cache size decrement + * algorithm. Note that while this code evicts aged out + * entries, the code does not change the maximum cache size. + * Instead, the function simply computes the new value (if + * any change is indicated) and reports this value in + * *new_max_cache_size_ptr. + * + * Return: Non-negative on success/Negative on failure or if there was + * an attempt to flush a protected item. + * + * + * Programmer: John Mainzer, 11/18/04 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static herr_t +H5C__autoadjust__ageout(H5C_t * cache_ptr, + double hit_rate, + enum H5C_resize_status * status_ptr, + size_t * new_max_cache_size_ptr, + H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + hbool_t write_permitted, + hbool_t * first_flush_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + herr_t result; + size_t test_size; + + FUNC_ENTER_NOAPI_NOINIT(H5C__autoadjust__ageout) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( ( status_ptr ) && ( *status_ptr == in_spec ) ); + HDassert( ( new_max_cache_size_ptr ) && ( *new_max_cache_size_ptr == 0 ) ); + + /* remove excess epoch markers if any */ + if ( cache_ptr->epoch_markers_active > + (cache_ptr->resize_ctl).epochs_before_eviction ) { + + result = H5C__autoadjust__ageout__remove_excess_markers(cache_ptr); + + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "can't remove excess epoch markers.") + } + } + + if ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out ) + || + ( ( (cache_ptr->resize_ctl).decr_mode == + H5C_decr__age_out_with_threshold + ) + && + ( hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold ) + ) + ) { + + if ( cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size ){ + + /* evict aged out cache entries if appropriate... */ + result = H5C__autoadjust__ageout__evict_aged_out_entries + ( + f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + write_permitted, + first_flush_ptr + ); + + if ( result != SUCCEED ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "error flushing aged out entries.") + } + + /* ... and then reduce cache size if appropriate */ + if ( cache_ptr->index_size < cache_ptr->max_cache_size ) { + + if ( (cache_ptr->resize_ctl).apply_empty_reserve ) { + + test_size = (size_t)(((double)cache_ptr->index_size) / + (1 - (cache_ptr->resize_ctl).empty_reserve)); + + if ( test_size < cache_ptr->max_cache_size ) { + + *status_ptr = decrease; + *new_max_cache_size_ptr = test_size; + } + } else { + + *status_ptr = decrease; + *new_max_cache_size_ptr = cache_ptr->index_size; + } + + if ( *status_ptr == decrease ) { + + /* clip to min size if necessary */ + if ( *new_max_cache_size_ptr < + (cache_ptr->resize_ctl).min_size ) { + + *new_max_cache_size_ptr = + (cache_ptr->resize_ctl).min_size; + } + + /* clip to max decrement if necessary */ + if ( ( (cache_ptr->resize_ctl).apply_max_decrement ) && + ( ((cache_ptr->resize_ctl).max_decrement + + *new_max_cache_size_ptr) < + cache_ptr->max_cache_size ) ) { + + *new_max_cache_size_ptr = cache_ptr->max_cache_size - + (cache_ptr->resize_ctl).max_decrement; + } + } + } + } else { + + *status_ptr = at_min_size; + } + } + +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C__autoadjust__ageout() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5C__autoadjust__ageout__cycle_epoch_marker + * + * Purpose: Remove the oldest epoch marker from the LRU list, + * and reinsert it at the head of the LRU list. Also + * remove the epoch marker's index from the head of the + * ring buffer, and re-insert it at the tail of the ring + * buffer. + * + * Return: SUCCEED on success/FAIL on failure. + * + * Programmer: John Mainzer, 11/22/04 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static herr_t +H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + int i; + + FUNC_ENTER_NOAPI_NOINIT(H5C__autoadjust__ageout__cycle_epoch_marker) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + + if ( cache_ptr->epoch_markers_active <= 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "No active epoch markers on entry?!?!?.") + } + + /* remove the last marker from both the ring buffer and the LRU list */ + + i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first]; + + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % + (H5C__MAX_EPOCH_MARKERS + 1); + + cache_ptr->epoch_marker_ringbuf_size -= 1; + + if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.") + } + + if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + } + + H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \ + (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, \ + (FAIL)) + + /* now, re-insert it at the head of the LRU list, and at the tail of + * the ring buffer. + */ + + HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i ); + HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL ); + HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL ); + + cache_ptr->epoch_marker_ringbuf_last = + (cache_ptr->epoch_marker_ringbuf_last + 1) % + (H5C__MAX_EPOCH_MARKERS + 1); + + (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; + + cache_ptr->epoch_marker_ringbuf_size += 1; + + if ( cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.") + } + + H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \ + (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, \ + (FAIL)) +done: + + FUNC_LEAVE_NOAPI(ret_value) + +} /* H5C__autoadjust__ageout__cycle_epoch_marker() */ + + +/*------------------------------------------------------------------------- + * + * Function: H5C__autoadjust__ageout__evict_aged_out_entries + * + * Purpose: Evict clean entries in the cache that haven't + * been accessed for at least + * (cache_ptr->resize_ctl).epochs_before_eviction epochs, + * and flush dirty entries that haven't been accessed for + * that amount of time. + * + * Depending on configuration, the function will either + * flush or evict all such entries, or all such entries it + * encounters until it has freed the maximum amount of space + * allowed under the maximum decrement. + * + * If we are running in parallel mode, writes may not be + * permitted. If so, the function simply skips any dirty + * entries it may encounter. + * + * The function makes no attempt to maintain the minimum + * clean size, as there is no guarantee that the cache size + * will be changed. + * + * If there is no cache size change, the minimum clean size + * constraint will be met through a combination of clean + * entries and free space in the cache. + * + * If there is a cache size reduction, the minimum clean size + * will be re-calculated, and will be enforced the next time + * we have to make space in the cache. + * + * The primary_dxpl_id and secondary_dxpl_id parameters + * specify the dxpl_ids used depending on the value of + * *first_flush_ptr. The idea is to use the primary_dxpl_id + * on the first write in a sequence of writes, and to use + * the secondary_dxpl_id on all subsequent writes. + * + * This is useful in the metadata cache, but may not be + * needed elsewhere. If so, just use the same dxpl_id for + * both parameters. + * + * Observe that this function cannot occasion a read. + * + * Return: Non-negative on success/Negative on failure. + * + * Programmer: John Mainzer, 11/22/04 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static herr_t +H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, + hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, + H5C_t * cache_ptr, + hbool_t write_permitted, + hbool_t * first_flush_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + herr_t result; + size_t eviction_size_limit; + size_t bytes_evicted = 0; + H5C_cache_entry_t * entry_ptr; + H5C_cache_entry_t * prev_ptr; + + FUNC_ENTER_NOAPI_NOINIT(H5C__autoadjust__ageout__evict_aged_out_entries) + + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + + /* if there is a limit on the amount that the cache size can be decrease + * in any one round of the cache size reduction algorithm, load that + * limit into eviction_size_limit. Otherwise, set eviction_size_limit + * to the equivalent of infinity. The current size of the index will + * do nicely. + */ + if ( (cache_ptr->resize_ctl).apply_max_decrement ) { + + eviction_size_limit = (cache_ptr->resize_ctl).max_decrement; + + } else { + + eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */ + } + + if ( write_permitted ) { + + entry_ptr = cache_ptr->LRU_tail_ptr; + + while ( ( entry_ptr != NULL ) && + ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) && + ( bytes_evicted < eviction_size_limit ) ) + { + HDassert( ! (entry_ptr->is_protected) ); + + prev_ptr = entry_ptr->prev; + + if ( entry_ptr->is_dirty ) { + + result = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + entry_ptr->type, + entry_ptr->addr, + (unsigned)0, + first_flush_ptr, + FALSE); + } else { + + bytes_evicted += entry_ptr->size; + + result = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + entry_ptr->type, + entry_ptr->addr, + H5F_FLUSH_INVALIDATE, + first_flush_ptr, + TRUE); + } + + if ( result < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "unable to flush entry") + } + + entry_ptr = prev_ptr; + + } /* end while */ + + /* for now at least, don't bother to maintain the minimum clean size, + * as the cache should now be less than its maximum size. Due to + * the vaguries of the cache size reduction algorthim, we may not + * reduce the size of the cache. + * + * If we do, we will calculate a new minimum clean size, which will + * be enforced the next time we try to make space in the cache. + * + * If we don't, no action is necessary, as we have just evicted and/or + * or flushed a bunch of entries and therefore the sum of the clean + * and free space in the cache must be greater than or equal to the + * min clean space requirement (assuming that requirement was met on + * entry). + */ + + } else /* ! write_permitted */ { + + /* since we are not allowed to write, all we can do is evict + * any clean entries that we may encounter before we either + * hit the eviction size limit, or encounter the epoch marker. + * + * If we are operating read only, this isn't an issue, as there + * will not be any dirty entries. + * + * If we are operating in R/W mode, all the dirty entries we + * skip will be flushed the next time we attempt to make space + * when writes are permitted. This may have some local + * performance implications, but it shouldn't cause any net + * slowdown. + */ + + HDassert( H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS ); + + entry_ptr = cache_ptr->LRU_tail_ptr; + + while ( ( entry_ptr != NULL ) && + ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) && + ( bytes_evicted < eviction_size_limit ) ) + { + HDassert( ! (entry_ptr->is_protected) ); + + prev_ptr = entry_ptr->prev; + + if ( ! (entry_ptr->is_dirty) ) { + + result = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + entry_ptr->type, + entry_ptr->addr, + H5F_FLUSH_INVALIDATE, + first_flush_ptr, + TRUE); + + if ( result < 0 ) { - HDfprintf(stdout, - " Total hits / misses / hit_rate = %ld / %ld / %f\n", - (long)total_hits, - (long)total_misses, - hit_rate); + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ + "unable to flush clean entry") + } + } + /* just skip the entry if it is dirty, as we can't do + * anything with it now since we can't write. + */ - HDfprintf(stdout, - " Total clears / flushes / evictions = %ld / %ld / %ld\n", - (long)total_clears, - (long)total_flushes, - (long)total_evictions); + entry_ptr = prev_ptr; - HDfprintf(stdout, " Total insertions / renames = %ld / %ld\n", - (long)total_insertions, - (long)total_renames); + } /* end while */ + } -#if H5C_COLLECT_CACHE_ENTRY_STATS + if ( cache_ptr->index_size < cache_ptr->max_cache_size ) { - HDfprintf(stdout, " aggregate max / min accesses = %d / %d\n", - (int)aggregate_max_accesses, - (int)aggregate_min_accesses); + cache_ptr->cache_full = FALSE; + } - HDfprintf(stdout, " aggregate max_clears / max_flushes = %d / %d\n", - (int)aggregate_max_clears, - (int)aggregate_max_flushes); +done: - HDfprintf(stdout, " aggregate max_size = %d\n", - (int)aggregate_max_size); + FUNC_LEAVE_NOAPI(ret_value) +} /* H5C__autoadjust__ageout__evict_aged_out_entries() */ -#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ + +/*------------------------------------------------------------------------- + * + * Function: H5C__autoadjust__ageout__insert_new_marker + * + * Purpose: Find an unused marker cache entry, mark it as used, and + * insert it at the head of the LRU list. Also add the + * marker's index in the epoch_markers array. + * + * Return: SUCCEED on success/FAIL on failure. + * + * Programmer: John Mainzer, 11/19/04 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ - if ( display_detailed_stats ) - { +static herr_t +H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + int i; - for ( i = 0; i <= cache_ptr->max_type_id; i++ ) { + FUNC_ENTER_NOAPI_NOINIT(H5C__autoadjust__ageout__insert_new_marker) - HDfprintf(stdout, "\n"); + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); - HDfprintf(stdout, " Stats on %s:\n", - ((cache_ptr->type_name_table_ptr))[i]); + if ( cache_ptr->epoch_markers_active >= + (cache_ptr->resize_ctl).epochs_before_eviction ) { - if ( ( cache_ptr->hits[i] > 0 ) || ( cache_ptr->misses[i] > 0 ) ) { + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "Already have a full complement of markers.") + } - hit_rate = 100.0 * ((double)(cache_ptr->hits[i])) / - ((double)(cache_ptr->hits[i] + cache_ptr->misses[i])); - } else { - hit_rate = 0.0; - } + /* find an unused marker */ + i = 0; + while ( ( (cache_ptr->epoch_marker_active)[i] ) && + ( i < H5C__MAX_EPOCH_MARKERS ) ) + { + i++; + } - HDfprintf(stdout, - " hits / misses / hit_rate = %ld / %ld / %f\n", - (long)(cache_ptr->hits[i]), - (long)(cache_ptr->misses[i]), - hit_rate); + HDassert( i < H5C__MAX_EPOCH_MARKERS ); - HDfprintf(stdout, - " clears / flushes / evictions = %ld / %ld / %ld\n", - (long)(cache_ptr->clears[i]), - (long)(cache_ptr->flushes[i]), - (long)(cache_ptr->evictions[i])); + if ( (cache_ptr->epoch_marker_active)[i] != FALSE ) { - HDfprintf(stdout, - " insertions / renames = %ld / %ld\n", - (long)(cache_ptr->insertions[i]), - (long)(cache_ptr->renames[i])); + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker.") + } -#if H5C_COLLECT_CACHE_ENTRY_STATS + HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i ); + HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL ); + HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL ); - HDfprintf(stdout, - " entry max / min accesses = %d / %d\n", - cache_ptr->max_accesses[i], - cache_ptr->min_accesses[i]); + (cache_ptr->epoch_marker_active)[i] = TRUE; - HDfprintf(stdout, - " entry max_clears / max_flushes = %d / %d\n", - cache_ptr->max_clears[i], - cache_ptr->max_flushes[i]); + cache_ptr->epoch_marker_ringbuf_last = + (cache_ptr->epoch_marker_ringbuf_last + 1) % + (H5C__MAX_EPOCH_MARKERS + 1); - HDfprintf(stdout, - " entry max_size = %d\n", - (int)(cache_ptr->max_size[i])); + (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i; + cache_ptr->epoch_marker_ringbuf_size += 1; -#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ + if ( cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS ) { - } + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.") } - HDfprintf(stdout, "\n"); + H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \ + (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, \ + (FAIL)) -#endif /* H5C_COLLECT_CACHE_STATS */ + cache_ptr->epoch_markers_active += 1; done: + FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_stats() */ +} /* H5C__autoadjust__ageout__insert_new_marker() */ /*------------------------------------------------------------------------- * - * Function: H5C_stats__reset + * Function: H5C__autoadjust__ageout__remove_all_markers * - * Purpose: Reset the stats fields to their initial values. + * Purpose: Remove all epoch markers from the LRU list and mark them + * as inactive. * - * Return: void + * Return: SUCCEED on success/FAIL on failure. * - * Programmer: John Mainzer, 4/28/04 + * Programmer: John Mainzer, 11/22/04 * * Modifications: * - * JRM - 7/21/04 - * Updated for hash table related statistics. + * None. * *------------------------------------------------------------------------- */ -void -H5C_stats__reset(H5C_t * cache_ptr) +static herr_t +H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr) { -#if H5C_COLLECT_CACHE_STATS + herr_t ret_value = SUCCEED; /* Return value */ int i; -#endif /* H5C_COLLECT_CACHE_STATS */ + int ring_buf_index; + + FUNC_ENTER_NOAPI_NOINIT(H5C__autoadjust__ageout__remove_all_markers) HDassert( cache_ptr ); HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); -#if H5C_COLLECT_CACHE_STATS - for ( i = 0; i <= cache_ptr->max_type_id; i++ ) + while ( cache_ptr->epoch_markers_active > 0 ) { - cache_ptr->hits[i] = 0; - cache_ptr->misses[i] = 0; - cache_ptr->insertions[i] = 0; - cache_ptr->clears[i] = 0; - cache_ptr->flushes[i] = 0; - cache_ptr->evictions[i] = 0; - cache_ptr->renames[i] = 0; - } + /* get the index of the last epoch marker in the LRU list + * and remove it from the ring buffer. + */ - cache_ptr->total_ht_insertions = 0; - cache_ptr->total_ht_deletions = 0; - cache_ptr->successful_ht_searches = 0; - cache_ptr->total_successful_ht_search_depth = 0; - cache_ptr->failed_ht_searches = 0; - cache_ptr->total_failed_ht_search_depth = 0; + ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; + i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; - cache_ptr->max_index_len = 0; - cache_ptr->max_index_size = (size_t)0; + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % + (H5C__MAX_EPOCH_MARKERS + 1); - cache_ptr->max_slist_len = 0; - cache_ptr->max_slist_size = (size_t)0; + cache_ptr->epoch_marker_ringbuf_size -= 1; - cache_ptr->max_pl_len = 0; - cache_ptr->max_pl_size = (size_t)0; + if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) { -#if H5C_COLLECT_CACHE_ENTRY_STATS + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.") + } - for ( i = 0; i <= cache_ptr->max_type_id; i++ ) - { - cache_ptr->max_accesses[i] = 0; - cache_ptr->min_accesses[i] = 1000000; - cache_ptr->max_clears[i] = 0; - cache_ptr->max_flushes[i] = 0; - cache_ptr->max_size[i] = (size_t)0; + if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + } + + /* remove the epoch marker from the LRU list */ + H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \ + (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, \ + (FAIL)) + + /* mark the epoch marker as unused. */ + (cache_ptr->epoch_marker_active)[i] = FALSE; + + HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i ); + HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL ); + HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL ); + + /* decrement the number of active epoch markers */ + cache_ptr->epoch_markers_active -= 1; + + HDassert( cache_ptr->epoch_markers_active == \ + cache_ptr->epoch_marker_ringbuf_size ); } -#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ -#endif /* H5C_COLLECT_CACHE_STATS */ +done: - return; + FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_stats__reset() */ +} /* H5C__autoadjust__ageout__remove_all_markers() */ /*------------------------------------------------------------------------- - * Function: H5C_set_skip_flags * - * Purpose: Set the values of the skip sanity check flags. - * - * This function and the skip sanity check flags were created - * for the convenience of the test bed. However it is - * possible that there may be other uses for the flags. + * Function: H5C__autoadjust__ageout__remove_excess_markers * - * Return: Non-negative on success/Negative on failure + * Purpose: Remove epoch markers from the end of the LRU list and + * mark them as inactive until the number of active markers + * equals the the current value of + * (cache_ptr->resize_ctl).epochs_before_eviction. * - * Programmer: John Mainzer - * 6/11/04 + * Return: SUCCEED on success/FAIL on failure. + * + * Programmer: John Mainzer, 11/19/04 * * Modifications: * + * None. + * *------------------------------------------------------------------------- */ -herr_t -H5C_set_skip_flags(H5C_t * cache_ptr, - hbool_t skip_file_checks, - hbool_t skip_dxpl_id_checks) +static herr_t +H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr) { - herr_t ret_value = SUCCEED; /* Return value */ + herr_t ret_value = SUCCEED; /* Return value */ + int i; + int ring_buf_index; - FUNC_ENTER_NOAPI(H5C_set_skip_flags, FAIL) + FUNC_ENTER_NOAPI_NOINIT(H5C__autoadjust__ageout__remove_excess_markers) - /* This would normally be an assert, but we need to use an HGOTO_ERROR - * call to shut up the compiler. - */ - if ( ( ! cache_ptr ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { + HDassert( cache_ptr ); + HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr") + if ( cache_ptr->epoch_markers_active <= + (cache_ptr->resize_ctl).epochs_before_eviction ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry.") } - cache_ptr->skip_file_checks = skip_file_checks; - cache_ptr->skip_dxpl_id_checks = skip_dxpl_id_checks; + while ( cache_ptr->epoch_markers_active > + (cache_ptr->resize_ctl).epochs_before_eviction ) + { + /* get the index of the last epoch marker in the LRU list + * and remove it from the ring buffer. + */ + + ring_buf_index = cache_ptr->epoch_marker_ringbuf_first; + i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index]; + + cache_ptr->epoch_marker_ringbuf_first = + (cache_ptr->epoch_marker_ringbuf_first + 1) % + (H5C__MAX_EPOCH_MARKERS + 1); + + cache_ptr->epoch_marker_ringbuf_size -= 1; + + if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.") + } + + if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?") + } + + /* remove the epoch marker from the LRU list */ + H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \ + (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, \ + (FAIL)) + + /* mark the epoch marker as unused. */ + (cache_ptr->epoch_marker_active)[i] = FALSE; + + HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i ); + HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL ); + HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL ); + + /* decrement the number of active epoch markers */ + cache_ptr->epoch_markers_active -= 1; + + HDassert( cache_ptr->epoch_markers_active == \ + cache_ptr->epoch_marker_ringbuf_size ); + } done: + FUNC_LEAVE_NOAPI(ret_value) -} /* H5C_set_skip_flags() */ +} /* H5C__autoadjust__ageout__remove_excess_markers() */ -/*************************************************************************/ -/**************************** Private Functions: *************************/ -/*************************************************************************/ - /*------------------------------------------------------------------------- * * Function: H5C_flush_single_entry @@ -3689,27 +5345,40 @@ H5C_flush_single_entry(H5F_t * f, H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) #if H5C_DO_SANITY_CHECKS - if ( entry_ptr->in_slist ) { + if ( entry_ptr != NULL ) { - if ( ( entry_ptr->addr != addr ) ) { + if ( entry_ptr->in_slist ) { - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Hash table and skip list out of sync.") - } - } else if ( entry_ptr != NULL ) { + if ( entry_ptr->addr != addr ) { - if ( ( entry_ptr->in_slist ) || - ( entry_ptr->is_dirty ) || - ( entry_ptr->addr != addr ) ) { + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "entry in slist failed sanity checks.") + } + } else { - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "entry failed sanity checks.") + if ( ( entry_ptr->is_dirty ) || ( entry_ptr->addr != addr ) ) { + + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "entry failed sanity checks.") + } } } +#if 0 + /* this should be useful for debugging from time to time. + * lets leave it in for now. -- JRM 12/15/04 + */ + else { + HDfprintf(stdout, + "H5C_flush_single_entry(): non-existant entry. addr = %a\n", + addr); + HDfflush(stdout); + } +#endif #endif /* H5C_DO_SANITY_CHECKS */ if ( ( entry_ptr != NULL ) && ( entry_ptr->is_protected ) ) { + /* Attempt to flush a protected entry -- scream and die. */ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, \ "Attempt to flush a protected entry.") @@ -3773,9 +5442,9 @@ H5C_flush_single_entry(H5F_t * f, } /* Always remove the entry from the hash table on a destroy. On a - * flush with destroy, it is cheaper to discard the skip list all at once - * rather than remove the entries one by one, so we only delete from - * the list if requested. + * flush with destroy, it is cheaper to discard the skip list all at + * once rather than remove the entries one by one, so we only delete + * from the list if requested. * * We must do deletions now as the callback routines will free the * entry if destroy is true. @@ -3784,7 +5453,8 @@ H5C_flush_single_entry(H5F_t * f, H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) - if ( ( entry_ptr->in_slist ) && ( del_entry_from_slist_on_destroy ) ) { + if ( ( entry_ptr->in_slist ) && + ( del_entry_from_slist_on_destroy ) ) { H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr) } @@ -3805,8 +5475,10 @@ H5C_flush_single_entry(H5F_t * f, /* Clear the dirty flag only, if requested */ if ( clear_only ) { + /* Call the callback routine to clear all dirty flags for object */ if ( (entry_ptr->type->clear)(f, entry_ptr, destroy) < 0 ) { + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry") } } else { @@ -3815,16 +5487,20 @@ H5C_flush_single_entry(H5F_t * f, */ if ( *first_flush_ptr && entry_ptr->is_dirty ) { + status = (entry_ptr->type->flush)(f, primary_dxpl_id, destroy, entry_ptr->addr, entry_ptr); *first_flush_ptr = FALSE; + } else { + status = (entry_ptr->type->flush)(f, secondary_dxpl_id, destroy, entry_ptr->addr, entry_ptr); } if ( status < 0 ) { + HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ "unable to flush entry") } @@ -3970,6 +5646,14 @@ done: * Minor modifications in support of the addition of a hash * table to facilitate lookups. * + * JRM -- 11/22/04 + * Added the first_flush_ptr parameter, which replaces the + * old first_flush local variable. This allows the function + * to coordinate on the first flush issue with other functions. + * + * JRM -- 12/13/04 + * Added code to skip over epoch markers if present. + * *------------------------------------------------------------------------- */ @@ -3979,9 +5663,9 @@ H5C_make_space_in_cache(H5F_t * f, hid_t secondary_dxpl_id, H5C_t * cache_ptr, size_t space_needed, - hbool_t write_permitted) + hbool_t write_permitted, + hbool_t * first_flush_ptr) { - hbool_t first_flush = TRUE; herr_t ret_value = SUCCEED; /* Return value */ herr_t result; int32_t entries_examined = 0; @@ -3993,6 +5677,8 @@ H5C_make_space_in_cache(H5F_t * f, HDassert( cache_ptr ); HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); + HDassert( first_flush_ptr != NULL ); + HDassert( ( *first_flush_ptr == TRUE ) || ( *first_flush_ptr == FALSE ) ); if ( write_permitted ) { @@ -4013,28 +5699,37 @@ H5C_make_space_in_cache(H5F_t * f, prev_ptr = entry_ptr->prev; - if ( entry_ptr->is_dirty ) { + if ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) { - result = H5C_flush_single_entry(f, - primary_dxpl_id, - secondary_dxpl_id, - cache_ptr, - entry_ptr->type, - entry_ptr->addr, - (unsigned)0, - &first_flush, - FALSE); + if ( entry_ptr->is_dirty ) { + + result = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + entry_ptr->type, + entry_ptr->addr, + (unsigned)0, + first_flush_ptr, + FALSE); + } else { + + result = H5C_flush_single_entry(f, + primary_dxpl_id, + secondary_dxpl_id, + cache_ptr, + entry_ptr->type, + entry_ptr->addr, + H5F_FLUSH_INVALIDATE, + first_flush_ptr, + TRUE); + } } else { - result = H5C_flush_single_entry(f, - primary_dxpl_id, - secondary_dxpl_id, - cache_ptr, - entry_ptr->type, - entry_ptr->addr, - H5F_FLUSH_INVALIDATE, - &first_flush, - TRUE); + /* Skip epoch markers. Set result to SUCCEED to avoid + * triggering the error code below. + */ + result = SUCCEED; } if ( result < 0 ) { @@ -4069,7 +5764,7 @@ H5C_make_space_in_cache(H5F_t * f, entry_ptr->type, entry_ptr->addr, (unsigned)0, - &first_flush, + first_flush_ptr, FALSE); if ( result < 0 ) { @@ -4112,7 +5807,7 @@ H5C_make_space_in_cache(H5F_t * f, entry_ptr->type, entry_ptr->addr, H5F_FLUSH_INVALIDATE, - &first_flush, + first_flush_ptr, TRUE); if ( result < 0 ) { diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h new file mode 100644 index 0000000..edd67e7 --- /dev/null +++ b/src/H5Cpkg.h @@ -0,0 +1,686 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: John Mainzer -- 10/12/04 + * + * Purpose: This file contains declarations which are normally visible + * only within the H5C package (just H5C.c at present). + * + * Source files outside the H5C package should include + * H5Cprivate.h instead. + * + * The one exception to this rule is test/cache.c. The test + * code is easier to write if it can look at the cache's + * internal data structures. Indeed, this is the main + * reason why this file was created. + */ + +#ifndef H5C_PACKAGE +#error "Do not include this file outside the H5HL package!" +#endif + +#ifndef _H5Cpkg_H +#define _H5Cpkg_H + + +/* Get package's private header */ +#include "H5Cprivate.h" + + +/* Get needed headers */ +#include "H5SLprivate.h" /* Skip lists */ + + +#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */ + + +/**************************************************************************** + * + * structure H5C_t + * + * Catchall structure for all variables specific to an instance of the cache. + * + * While the individual fields of the structure are discussed below, the + * following overview may be helpful. + * + * Entries in the cache are stored in an instance of H5TB_TREE, indexed on + * the entry's disk address. While the H5TB_TREE is less efficient than + * hash table, it keeps the entries in address sorted order. As flushes + * in parallel mode are more efficient if they are issued in increasing + * address order, this is a significant benefit. Also the H5TB_TREE code + * was readily available, which reduced development time. + * + * While the cache was designed with multiple replacement policies in mind, + * at present only a modified form of LRU is supported. + * + * JRM - 4/26/04 + * + * Profiling has indicated that searches in the instance of H5TB_TREE are + * too expensive. To deal with this issue, I have augmented the cache + * with a hash table in which all entries will be stored. Given the + * advantages of flushing entries in increasing address order, the TBBT + * is retained, but only dirty entries are stored in it. At least for + * now, we will leave entries in the TBBT after they are flushed. + * + * Note that index_size and index_len now refer to the total size of + * and number of entries in the hash table. + * + * JRM - 7/19/04 + * + * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. This + * field is used to validate pointers to instances of H5C_t. + * + * max_type_id: Integer field containing the maximum type id number assigned + * to a type of entry in the cache. All type ids from 0 to + * max_type_id inclusive must be defined. The names of the + * types are stored in the type_name_table discussed below, and + * indexed by the ids. + * + * type_name_table_ptr: Pointer to an array of pointer to char of length + * max_type_id + 1. The strings pointed to by the entries + * in the array are the names of the entry types associated + * with the indexing type IDs. + * + * max_cache_size: Nominal maximum number of bytes that may be stored in the + * cache. This value should be viewed as a soft limit, as the + * cache can exceed this value under the following circumstances: + * + * a) All entries in the cache are protected, and the cache is + * asked to insert a new entry. In this case the new entry + * will be created. If this causes the cache to exceed + * max_cache_size, it will do so. The cache will attempt + * to reduce its size as entries are unprotected. + * + * b) When running in parallel mode, the cache may not be + * permitted to flush a dirty entry in response to a read. + * If there are no clean entries available to evict, the + * cache will exceed its maximum size. Again the cache + * will attempt to reduce its size to the max_cache_size + * limit on the next cache write. + * + * min_clean_size: Nominal minimum number of clean bytes in the cache. + * The cache attempts to maintain this number of bytes of + * clean data so as to avoid case b) above. Again, this is + * a soft limit. + * + * + * In addition to the call back functions required for each entry, the + * cache requires the following call back functions for this instance of + * the cache as a whole: + * + * check_write_permitted: In certain applications, the cache may not + * be allowed to write to disk at certain time. If specified, + * the check_write_permitted function is used to determine if + * a write is permissible at any given point in time. + * + * If no such function is specified (i.e. this field is NULL), + * the cache will presume that writes are always permissable. + * + * + * The cache requires an index to facilitate searching for entries. The + * following fields support that index. + * + * index_len: Number of entries currently in the hash table used to index + * the cache. + * + * index_size: Number of bytes of cache entries currently stored in the + * hash table used to index the cache. + * + * This value should not be mistaken for footprint of the + * cache in memory. The average cache entry is small, and + * the cache has a considerable overhead. Multiplying the + * index_size by two should yield a conservative estimate + * of the cache's memory footprint. + * + * index: Array of pointer to H5C_cache_entry_t of size + * H5C__HASH_TABLE_LEN. At present, this value is a power + * of two, not the usual prime number. + * + * I hope that the variable size of cache elements, the large + * hash table size, and the way in which HDF5 allocates space + * will combine to avoid problems with periodicity. If so, we + * can use a trivial hash function (a bit-and and a 3 bit left + * shift) with some small savings. + * + * If not, it will become evident in the statistics. Changing + * to the usual prime number length hash table will require + * changing the H5C__HASH_FCN macro and the deletion of the + * H5C__HASH_MASK #define. No other changes should be required. + * + * + * When we flush the cache, we need to write entries out in increasing + * address order. An instance of a skip list is used to store dirty entries in + * sorted order. Whether it is cheaper to sort the dirty entries as needed, + * or to maintain the list is an open question. At a guess, it depends + * on how frequently the cache is flushed. We will see how it goes. + * + * For now at least, I will not remove dirty entries from the list as they + * are flushed. + * + * slist_len: Number of entries currently in the skip list + * used to maintain a sorted list of dirty entries in the + * cache. + * + * slist_size: Number of bytes of cache entries currently stored in the + * skip list used to maintain a sorted list of + * dirty entries in the cache. + * + * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted + * list of dirty entries in the cache. This sorted list has + * two uses: + * + * a) It allows us to flush dirty entries in increasing address + * order, which results in significant savings. + * + * b) It facilitates checking for adjacent dirty entries when + * attempting to evict entries from the cache. While we + * don't use this at present, I hope that this will allow + * some optimizations when I get to it. + * + * + * When a cache entry is protected, it must be removed from the LRU + * list(s) as it cannot be either flushed or evicted until it is unprotected. + * The following fields are used to implement the protected list (pl). + * + * pl_len: Number of entries currently residing on the protected list. + * + * pl_size: Number of bytes of cache entries currently residing on the + * protected list. + * + * pl_head_ptr: Pointer to the head of the doubly linked list of protected + * entries. Note that cache entries on this list are linked + * by their next and prev fields. + * + * This field is NULL if the list is empty. + * + * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected + * entries. Note that cache entries on this list are linked + * by their next and prev fields. + * + * This field is NULL if the list is empty. + * + * + * The cache must have a replacement policy, and the fields supporting this + * policy must be accessible from this structure. + * + * While there has been interest in several replacement policies for + * this cache, the initial development schedule is tight. Thus I have + * elected to support only a modified LRU policy for the first cut. + * + * To further simplify matters, I have simply included the fields needed + * by the modified LRU in this structure. When and if we add support for + * other policies, it will probably be easiest to just add the necessary + * fields to this structure as well -- we only create one instance of this + * structure per file, so the overhead is not excessive. + * + * + * Fields supporting the modified LRU policy: + * + * See most any OS text for a discussion of the LRU replacement policy. + * + * When operating in parallel mode, we must ensure that a read does not + * cause a write. If it does, the process will hang, as the write will + * be collective and the other processes will not know to participate. + * + * To deal with this issue, I have modified the usual LRU policy by adding + * clean and dirty LRU lists to the usual LRU list. + * + * The clean LRU list is simply the regular LRU list with all dirty cache + * entries removed. + * + * Similarly, the dirty LRU list is the regular LRU list with all the clean + * cache entries removed. + * + * When reading in parallel mode, we evict from the clean LRU list only. + * This implies that we must try to ensure that the clean LRU list is + * reasonably well stocked at all times. + * + * We attempt to do this by trying to flush enough entries on each write + * to keep the cLRU_list_size >= min_clean_size. + * + * Even if we start with a completely clean cache, a sequence of protects + * without unprotects can empty the clean LRU list. In this case, the + * cache must grow temporarily. At the next write, we will attempt to + * evict enough entries to reduce index_size to less than max_cache_size. + * While this will usually be possible, all bets are off if enough entries + * are protected. + * + * Discussions of the individual fields used by the modified LRU replacement + * policy follow: + * + * LRU_list_len: Number of cache entries currently on the LRU list. + * + * Observe that LRU_list_len + pl_len must always equal + * index_len. + * + * LRU_list_size: Number of bytes of cache entries currently residing on the + * LRU list. + * + * Observe that LRU_list_size + pl_size must always equal + * index_size. + * + * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache + * entries on this list are linked by their next and prev fields. + * + * This field is NULL if the list is empty. + * + * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache + * entries on this list are linked by their next and prev fields. + * + * This field is NULL if the list is empty. + * + * cLRU_list_len: Number of cache entries currently on the clean LRU list. + * + * Observe that cLRU_list_len + dLRU_list_len must always + * equal LRU_list_len. + * + * cLRU_list_size: Number of bytes of cache entries currently residing on + * the clean LRU list. + * + * Observe that cLRU_list_size + dLRU_list_size must always + * equal LRU_list_size. + * + * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * dLRU_list_len: Number of cache entries currently on the dirty LRU list. + * + * Observe that cLRU_list_len + dLRU_list_len must always + * equal LRU_list_len. + * + * dLRU_list_size: Number of cache entries currently on the dirty LRU list. + * + * Observe that cLRU_list_len + dLRU_list_len must always + * equal LRU_list_len. + * + * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * + * Automatic cache size adjustment: + * + * While the default cache size is adequate for most cases, we can run into + * cases where the default is too small. Ideally, we will let the user + * adjust the cache size as required. However, this is not possible in all + * cases. Thus I have added automatic cache size adjustment code. + * + * The configuration for the automatic cache size adjustment is stored in + * the structure described below: + * + * size_increase_possible: Depending on the configuration data given + * in the resize_ctl field, it may or may not be possible + * to increase the size of the cache. Rather than test for + * all the ways this can happen, we simply set this flag when + * we receive a new configuration. + * + * size_decrease_possible: Depending on the configuration data given + * in the resize_ctl field, it may or may not be possible + * to decrease the size of the cache. Rather than test for + * all the ways this can happen, we simply set this flag when + * we receive a new configuration. + * + * cache_full: Boolean flag used to keep track of whether the cache is + * full, so we can refrain from increasing the size of a + * cache which hasn't used up the space alotted to it. + * + * The field is initialized to FALSE, and then set to TRUE + * whenever we attempt to make space in the cache. + * + * resize_enabled: This is another convenience flag which is set whenever + * a new set of values for resize_ctl are provided. Very + * simply, + * + * resize_enabled = size_increase_possible || + * size_decrease_possible; + * + * size_decreased: Boolean flag set to TRUE whenever the maximun cache + * size is decreased. The flag triggers a call to + * H5C_make_space_in_cache() on the next call to H5C_protect(). + * + * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration + * data for automatic cache resizing. + * + * epoch_markers_active: Integer field containing the number of epoch + * markers currently in use in the LRU list. This value + * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1]. + * + * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS. + * This array is used to track which epoch markers are currently + * in use. + * + * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1. + * + * To manage the epoch marker cache entries, it is necessary + * to track their order in the LRU list. This is done with + * epoch_marker_ringbuf. When markers are inserted at the + * head of the LRU list, the index of the marker in the + * epoch_markers array is inserted at the tail of the ring + * buffer. When it becomes the epoch_marker_active'th marker + * in the LRU list, it will have worked its way to the head + * of the ring buffer as well. This allows us to remove it + * without scanning the LRU list if such is required. + * + * epoch_marker_ringbuf_first: Integer field containing the index of the + * first entry in the ring buffer. + * + * epoch_marker_ringbuf_last: Integer field containing the index of the + * last entry in the ring buffer. + * + * epoch_marker_ringbuf_size: Integer field containing the number of entries + * in the ring buffer. + * + * epoch_markers: Array of instances of H5C_cache_entry_t of length + * H5C__MAX_EPOCH_MARKERS. The entries are used as markers + * in the LRU list to identify cache entries that haven't + * been accessed for some (small) specified number of + * epochs. These entries (if any) can then be evicted and + * the cache size reduced -- ideally without evicting any + * of the current working set. Needless to say, the epoch + * length and the number of epochs before an unused entry + * must be chosen so that all, or almost all, the working + * set will be accessed before the limit. + * + * Epoch markers only appear in the LRU list, never in + * the index or slist. While they are of type + * H5C__EPOCH_MARKER_TYPE, and have associated class + * functions, these functions should never be called. + * + * The addr fields of these instances of H5C_cache_entry_t + * are set to the index of the instance in the epoch_markers + * array, the size is set to 0, and the type field points + * to the constant structure epoch_marker_class defined + * in H5C.c. The next and prev fields are used as usual + * to link the entry into the LRU list. + * + * All other fields are unused. + * + * + * Cache hit rate collection fields: + * + * We supply the current cache hit rate on request, so we must keep a + * simple cache hit rate computation regardless of whether statistics + * collection is enabled. The following fields support this capability. + * + * cache_hits: Number of cache hits since the last time the cache hit + * rate statistics were reset. Note that when automatic cache + * re-sizing is enabled, this field will be reset every automatic + * resize epoch. + * + * cache_accesses: Number of times the cache has been accessed while + * since the last since the last time the cache hit rate statistics + * were reset. Note that when automatic cache re-sizing is enabled, + * this field will be reset every automatic resize epoch. + * + * + * Statistics collection fields: + * + * When enabled, these fields are used to collect statistics as described + * below. The first set are collected only when H5C_COLLECT_CACHE_STATS + * is true. + * + * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been in cache when requested in + * the current epoch. + * + * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has not been in cache when + * requested in the current epoch. + * + * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been inserted into the + * cache in the current epoch. + * + * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been cleared in the current + * epoch. + * + * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been written to disk in the + * current epoch. + * + * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been evicted from the cache in + * the current epoch. + * + * renames: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been renamed in the current + * epoch. + * + * total_ht_insertions: Number of times entries have been inserted into the + * hash table in the current epoch. + * + * total_ht_deletions: Number of times entries have been deleted from the + * hash table in the current epoch. + * + * successful_ht_searches: int64 containing the total number of successful + * searches of the hash table in the current epoch. + * + * total_successful_ht_search_depth: int64 containing the total number of + * entries other than the targets examined in successful + * searches of the hash table in the current epoch. + * + * failed_ht_searches: int64 containing the total number of unsuccessful + * searches of the hash table in the current epoch. + * + * total_failed_ht_search_depth: int64 containing the total number of + * entries examined in unsuccessful searches of the hash + * table in the current epoch. + * + * max_index_len: Largest value attained by the index_len field in the + * current epoch. + * + * max_index_size: Largest value attained by the index_size field in the + * current epoch. + * + * max_slist_len: Largest value attained by the slist_len field in the + * current epoch. + * + * max_slist_size: Largest value attained by the slist_size field in the + * current epoch. + * + * max_pl_len: Largest value attained by the pl_len field in the + * current epoch. + * + * max_pl_size: Largest value attained by the pl_size field in the + * current epoch. + * + * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS + * and H5C_COLLECT_CACHE_ENTRY_STATS are true. + * + * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times any single + * entry with type id equal to the array index has been + * accessed in the current epoch. + * + * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the minimum number of times any single + * entry with type id equal to the array index has been + * accessed in the current epoch. + * + * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times any single + * entry with type id equal to the array index has been cleared + * in the current epoch. + * + * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times any single + * entry with type id equal to the array index has been + * flushed in the current epoch. + * + * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum size of any single entry + * with type id equal to the array index that has resided in + * the cache in the current epoch. + * + * + * Fields supporting testing: + * + * For test purposes, it is useful to turn off some asserts and sanity + * checks. The following flags support this. + * + * skip_file_checks: Boolean flag used to skip sanity checks on file + * parameters passed to the cache. In the test bed, there + * is no reason to have a file open, as the cache proper + * just passes these parameters through without using them. + * + * When this flag is set, all sanity checks on the file + * parameters are skipped. The field defaults to FALSE. + * + * skip_dxpl_id_checks: Boolean flag used to skip sanity checks on the + * dxpl_id parameters passed to the cache. These are not + * used directly by the cache, so skipping the checks + * simplifies the test bed. + * + * When this flag is set, all sanity checks on the dxpl_id + * parameters are skipped. The field defaults to FALSE. + * + ****************************************************************************/ + +#define H5C__H5C_T_MAGIC 0x005CAC0E +#define H5C__MAX_NUM_TYPE_IDS 9 +#define H5C__MAX_EPOCH_MARKERS 10 + +struct H5C_t +{ + uint32_t magic; + + int32_t max_type_id; + const char * (* type_name_table_ptr); + + size_t max_cache_size; + size_t min_clean_size; + + H5C_write_permitted_func_t check_write_permitted; + + int32_t index_len; + size_t index_size; + H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]); + + + int32_t slist_len; + size_t slist_size; + H5SL_t * slist_ptr; + + + int32_t pl_len; + size_t pl_size; + H5C_cache_entry_t * pl_head_ptr; + H5C_cache_entry_t * pl_tail_ptr; + + int32_t LRU_list_len; + size_t LRU_list_size; + H5C_cache_entry_t * LRU_head_ptr; + H5C_cache_entry_t * LRU_tail_ptr; + + int32_t cLRU_list_len; + size_t cLRU_list_size; + H5C_cache_entry_t * cLRU_head_ptr; + H5C_cache_entry_t * cLRU_tail_ptr; + + int32_t dLRU_list_len; + size_t dLRU_list_size; + H5C_cache_entry_t * dLRU_head_ptr; + H5C_cache_entry_t * dLRU_tail_ptr; + + hbool_t size_increase_possible; + hbool_t size_decrease_possible; + hbool_t resize_enabled; + hbool_t cache_full; + hbool_t size_decreased; + H5C_auto_size_ctl_t resize_ctl; + + int32_t epoch_markers_active; + hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]; + int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]; + int32_t epoch_marker_ringbuf_first; + int32_t epoch_marker_ringbuf_last; + int32_t epoch_marker_ringbuf_size; + H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]; + + int64_t cache_hits; + int64_t cache_accesses; + +#if H5C_COLLECT_CACHE_STATS + + /* stats fields */ + int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t renames[H5C__MAX_NUM_TYPE_IDS + 1]; + + int64_t total_ht_insertions; + int64_t total_ht_deletions; + int64_t successful_ht_searches; + int64_t total_successful_ht_search_depth; + int64_t failed_ht_searches; + int64_t total_failed_ht_search_depth; + + int32_t max_index_len; + size_t max_index_size; + + int32_t max_slist_len; + size_t max_slist_size; + + + int32_t max_pl_len; + size_t max_pl_size; + +#if H5C_COLLECT_CACHE_ENTRY_STATS + + int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1]; + +#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ + +#endif /* H5C_COLLECT_CACHE_STATS */ + + hbool_t skip_file_checks; + hbool_t skip_dxpl_id_checks; + +}; + +#endif /* _H5Cpkg_H */ + diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 7ae6b58..20d67f2 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -85,6 +85,11 @@ #endif /* H5_HAVE_PARALLEL */ +/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */ + +typedef struct H5C_t H5C_t; + + /* * Class methods pertaining to caching. Each type of cached object will * have a constant variable with permanent life-span that describes how @@ -144,6 +149,18 @@ typedef herr_t (*H5C_write_permitted_func_t)(H5F_t *f, hid_t dxpl_id, hbool_t * write_permitted_ptr); +/* Upper and lower limits on cache size. These limits are picked + * out of a hat -- you should be able to change them as necessary. + * + * However, if you need a very big cache, you should also increase the + * size of the hash table (H5C__HASH_TABLE_LEN in H5C.c). The current + * upper bound on cache size is rather large for the current hash table + * size. + */ + +#define H5C__MAX_MAX_CACHE_SIZE ((size_t)(128 * 1024 * 1024)) +#define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024)) + /* Default max cache size and min clean size are give here to make * them generally accessable. @@ -177,6 +194,10 @@ typedef herr_t (*H5C_write_permitted_func_t)(H5F_t *f, * The entries should never overlap, and when we do writebacks, * we will want to writeback adjacent entries where possible. * + * NB: At present, entries need not be contiguous on disk. Until + * we fix this, we can't do much with writing back adjacent + * entries. + * * type: Pointer to the instance of H5C_class_t containing pointers * to the methods for cache entries of the current type. This * field should be NULL when the instance of H5C_cache_entry_t @@ -343,9 +364,293 @@ typedef struct H5C_cache_entry_t } H5C_cache_entry_t; -/* Typedef for the main structure for the cache (defined in H5C.c) */ +/**************************************************************************** + * + * structure H5C_auto_size_ctl_t + * + * Instances of H5C_auto_size_ctl_t are used to get and set the control + * fields for automatic cache re-sizing. + * + * The fields of the structure are discussed individually below: + * + * version: Integer field containing the version number of this version + * of the H5C_auto_size_ctl_t structure. Any instance of + * H5C_auto_size_ctl_t passed to the cache must have a known + * version number, or an error will be flagged. + * + * report_fcn: Pointer to the function that is to be called to report + * activities each time the auto cache resize code is executed. If the + * field is NULL, no call is made. + * + * If the field is not NULL, it must contain the address of a function + * of type H5C_auto_resize_report_fcn. + * + * set_initial_size: Boolean flag indicating whether the size of the + * initial size of the cache is to be set to the value given in + * the initial_size field. If set_initial_size is FALSE, the + * initial_size field is ignored. + * + * initial_size: If enabled, this field contain the size the cache is + * to be set to upon receipt of this structure. Needless to say, + * initial_size must lie in the closed interval [min_size, max_size]. + * + * min_clean_fraction: double in the range 0 to 1 indicating the fraction + * of the cache that is to be kept clean. This field is only used + * in parallel mode. Typical values are 0.1 to 0.5. + * + * max_size: Maximum size to which the cache can be adjusted. The + * supplied value must fall in the closed interval + * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, max_size must + * be greater than or equal to min_size. + * + * min_size: Minimum size to which the cache can be adjusted. The + * supplied value must fall in the closed interval + * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, min_size must + * be less than or equal to max_size. + * + * epoch_length: Number of accesses on the cache over which to collect + * hit rate stats before running the automatic cache resize code, + * if it is enabled. + * + * At the end of an epoch, we discard prior hit rate data and start + * collecting afresh. The epoch_length must lie in the closed + * interval [H5C__MIN_AR_EPOCH_LENGTH, H5C__MAX_AR_EPOCH_LENGTH]. + * + * + * Cache size increase control fields: + * + * incr_mode: Instance of the H5C_cache_incr_mode enumerated type whose + * value indicates how we determine whether the cache size should be + * increased. At present there are two possible values: + * + * H5C_incr__off: Don't attempt to increase the size of the cache + * automatically. + * + * When this increment mode is selected, the remaining fields + * in the cache size increase section ar ignored. + * + * H5C_incr__threshold: Attempt to increase the size of the cache + * whenever the average hit rate over the last epoch drops + * below the value supplied in the lower_hr_threshold + * field. + * + * Note that this attempt will fail if the cache is already + * at its maximum size, or if the cache is not already using + * all available space. + * + * lower_hr_threshold: Lower hit rate threshold. If the increment mode + * (incr_mode) is H5C_incr__threshold and the hit rate drops below the + * value supplied in this field in an epoch, increment the cache size by + * size_increment. Note that cache size may not be incremented above + * max_size, and that the increment may be further restricted by the + * max_increment field if it is enabled. + * + * When enabled, this field must contain a value in the range [0.0, 1.0]. + * Depending on the incr_mode selected, it may also have to be less than + * upper_hr_threshold. + * + * increment: Double containing the multiplier used to derive the new + * cache size from the old if a cache size increment is triggered. + * The increment must be greater than 1.0, and should not exceed 2.0. + * + * The new cache size is obtained my multiplying the current max cache + * size by the increment, and then clamping to max_size and to stay + * within the max_increment as necessary. + * + * apply_max_increment: Boolean flag indicating whether the max_increment + * field should be used to limit the maximum cache size increment. + * + * max_increment: If enabled by the apply_max_increment field described + * above, this field contains the maximum number of bytes by which the + * cache size can be increased in a single re-size. + * + * + * Cache size decrease control fields: + * + * decr_mode: Instance of the H5C_cache_decr_mode enumerated type whose + * value indicates how we determine whether the cache size should be + * decreased. At present there are four possibilities. + * + * H5C_decr__off: Don't attempt to decrease the size of the cache + * automatically. + * + * When this increment mode is selected, the remaining fields + * in the cache size decrease section ar ignored. + * + * H5C_decr__threshold: Attempt to decrease the size of the cache + * whenever the average hit rate over the last epoch rises + * above the value supplied in the upper_hr_threshold + * field. + * + * H5C_decr__age_out: At the end of each epoch, search the cache for + * entries that have not been accessed for at least the number + * of epochs specified in the epochs_before_eviction field, and + * evict these entries. Conceptually, the maximum cache size + * is then decreased to match the new actual cache size. However, + * this reduction may be modified by the min_size, the + * max_decrement, and/or the empty_reserve. + * + * H5C_decr__age_out_with_threshold: Same as age_out, but we only + * attempt to reduce the cache size when the hit rate observed + * over the last epoch exceeds the value provided in the + * upper_hr_threshold field. + * + * upper_hr_threshold: Upper hit rate threshold. The use of this field + * varies according to the current decr_mode: + * + * H5C_decr__off or H5C_decr__age_out: The value of this field is + * ignored. + * + * H5C_decr__threshold: If the hit rate exceeds this threshold in any + * epoch, attempt todecrement the cache size by size_decrement. + * + * Note that cache size may not be decremented below min_size. + * + * Note also that if the upper_threshold is 1.0, the cache size + * will never be reduced. + * + * H5C_decr__age_out_with_threshold: If the hit rate exceeds this + * threshold in any epoch, attempt to reduce the cache size + * by evicting entries that have not been accessed for more + * than the specified number of epochs. + * + * decrement: This field is only used when the decr_mode is + * H5C_decr__threshold. + * + * The field is a double containing the multiplier used to derive the + * new cache size from the old if a cache size decrement is triggered. + * The decrement must be in the range 0.0 (in which case the cache will + * try to contract to its minimum size) to 1.0 (in which case the + * cache will never shrink). + * + * apply_max_decrement: Boolean flag used to determine whether decrements + * in cache size are to be limited by the max_decrement field. + * + * max_decrement: Maximum number of bytes by which the cache size can be + * decreased in a single re-size. Note that decrements may also be + * restricted by the min_size of the cache, and (in age out modes) by + * the empty_reserve field. + * + * epochs_before_eviction: Integer field used in H5C_decr__age_out and + * H5C_decr__age_out_with_threshold decrement modes. + * + * This field contains the number of epochs an entry must remain + * unaccessed before it is evicted in an attempt to reduce the + * cache size. If applicable, this field must lie in the range + * [1, H5C__MAX_EPOCH_MARKERS]. + * + * apply_empty_reserve: Boolean field controlling whether the empty_reserve + * field is to be used in computing the new cache size when the + * decr_mode is H5C_decr__age_out or H5C_decr__age_out_with_threshold. + * + * empty_reserve: To avoid a constant racheting down of cache size by small + * amounts in the H5C_decr__age_out and H5C_decr__age_out_with_threshold + * modes, this field allows one to require that any cache size + * reductions leave the specified fraction of unused space in the cache. + * + * The value of this field must be in the range [0.0, 1.0]. I would + * expect typical values to be in the range of 0.01 to 0.1. + * + ****************************************************************************/ + +#define H5C__CURR_AUTO_SIZE_CTL_VER 1 +#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1 + +#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999 +#define H5C__DEF_AR_LOWER_THRESHHOLD 0.9 +#define H5C__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024)) +#define H5C__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024)) +#define H5C__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024)) +#define H5C__DEF_AR_MIN_CLEAN_FRAC 0.5 +#define H5C__DEF_AR_INCREMENT 2.0 +#define H5C__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024)) +#define H5C__DEF_AR_DECREMENT 0.9 +#define H5C__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024)) +#define H5C__DEF_AR_EPCHS_B4_EVICT 3 +#define H5C__DEF_AR_EMPTY_RESERVE 0.05 +#define H5C__MIN_AR_EPOCH_LENGTH 100 +#define H5C__DEF_AR_EPOCH_LENGTH 50000 +#define H5C__MAX_AR_EPOCH_LENGTH 1000000 + +enum H5C_resize_status +{ + in_spec, + increase, + decrease, + at_max_size, + at_min_size, + increase_disabled, + decrease_disabled, + not_full +}; /* enum H5C_resize_conditions */ + +enum H5C_cache_incr_mode +{ + H5C_incr__off, + H5C_incr__threshold +}; + +enum H5C_cache_decr_mode +{ + H5C_decr__off, + H5C_decr__threshold, + H5C_decr__age_out, + H5C_decr__age_out_with_threshold +}; + +typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr, + int32_t version, + double hit_rate, + enum H5C_resize_status status, + size_t old_max_cache_size, + size_t new_max_cache_size, + size_t old_min_clean_size, + size_t new_min_clean_size); +typedef struct H5C_auto_size_ctl_t +{ + /* general configuration fields: */ + int32_t version; + H5C_auto_resize_rpt_fcn rpt_fcn; + + hbool_t set_initial_size; + size_t initial_size; + + double min_clean_fraction; + + size_t max_size; + size_t min_size; + + int64_t epoch_length; + + + /* size increase control fields: */ + enum H5C_cache_incr_mode incr_mode; + + double lower_hr_threshold; + + double increment; + + hbool_t apply_max_increment; + size_t max_increment; + + + /* size decrease control fields: */ + enum H5C_cache_decr_mode decr_mode; + + double upper_hr_threshold; + + double decrement; + + hbool_t apply_max_decrement; + size_t max_decrement; + + int32_t epochs_before_eviction; + + hbool_t apply_empty_reserve; + double empty_reserve; + +} H5C_auto_size_ctl_t; -typedef struct H5C_t H5C_t; /* * Library prototypes. @@ -356,6 +661,15 @@ H5_DLL H5C_t * H5C_create(size_t max_cache_size, const char * (* type_name_table_ptr), H5C_write_permitted_func_t check_write_permitted); +H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr, + int32_t version, + double hit_rate, + enum H5C_resize_status status, + size_t old_max_cache_size, + size_t new_max_cache_size, + size_t old_min_clean_size, + size_t new_min_clean_size); + H5_DLL herr_t H5C_dest(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, @@ -369,6 +683,18 @@ H5_DLL herr_t H5C_flush_cache(H5F_t * f, H5C_t * cache_ptr, unsigned flags); +H5_DLL herr_t H5C_get_cache_auto_resize_config(H5C_t * cache_ptr, + H5C_auto_size_ctl_t *config_ptr); + +H5_DLL herr_t H5C_get_cache_size(H5C_t * cache_ptr, + size_t * max_size_ptr, + size_t * min_clean_size_ptr, + size_t * cur_size_ptr, + int32_t * cur_num_entries_ptr); + +H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t * cache_ptr, + double * hit_rate_ptr); + H5_DLL herr_t H5C_insert_entry(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, @@ -392,6 +718,21 @@ H5_DLL void * H5C_protect(H5F_t * f, const void * udata1, void * udata2); +H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr); + +H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t * cache_ptr, + H5C_auto_size_ctl_t *config_ptr); + +H5_DLL herr_t H5C_set_skip_flags(H5C_t * cache_ptr, + hbool_t skip_file_checks, + hbool_t skip_dxpl_id_checks); + +H5_DLL herr_t H5C_stats(H5C_t * cache_ptr, + const char * cache_name, + hbool_t display_detailed_stats); + +H5_DLL void H5C_stats__reset(H5C_t * cache_ptr); + H5_DLL herr_t H5C_unprotect(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, @@ -401,15 +742,5 @@ H5_DLL herr_t H5C_unprotect(H5F_t * f, void * thing, hbool_t deleted); -H5_DLL herr_t H5C_stats(H5C_t * cache_ptr, - const char * cache_name, - hbool_t display_detailed_stats); - -H5_DLL void H5C_stats__reset(H5C_t * cache_ptr); - -H5_DLL herr_t H5C_set_skip_flags(H5C_t * cache_ptr, - hbool_t skip_file_checks, - hbool_t skip_dxpl_id_checks); - #endif /* !_H5Cprivate_H */ diff --git a/src/Makefile.in b/src/Makefile.in index 685c916..972b2d4 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -67,15 +67,14 @@ PUB_HDR=H5public.h H5Apublic.h H5ACpublic.h H5Bpublic.h H5Cpublic.h \ ## Other header files (not to be installed)... PRIVATE_HDR=H5private.h H5Aprivate.h H5Apkg.h H5ACprivate.h H5Bprivate.h \ - H5Cprivate.h H5Dprivate.h H5Edefin.h H5Einit.h H5Eprivate.h H5Eterm.h \ - H5Fprivate.h H5FDprivate.h H5FLprivate.h H5FOprivate.h H5FPprivate.h \ - H5FSprivate.h H5Gprivate.h H5Gpkg.h H5HGprivate.h H5HGpkg.h \ - H5HLprivate.h H5HLpkg.h H5HPprivate.h H5Iprivate.h H5MFprivate.h \ - H5MMprivate.h H5Oprivate.h H5Opkg.h H5Pprivate.h H5Ppkg.h \ - H5Rprivate.h H5RCprivate.h H5RSprivate.h H5Sprivate.h H5SLprivate.h \ - H5STprivate.h \ - H5Tprivate.h H5TBprivate.h H5Tpkg.h H5TSprivate.h H5Vprivate.h \ - H5Zprivate.h H5Zpkg.h H5config.h + H5Cprivate.h H5Cpkg.h H5Dprivate.h H5Edefin.h H5Einit.h H5Eprivate.h \ + H5Eterm.h H5Fprivate.h H5FDprivate.h H5FLprivate.h H5FOprivate.h \ + H5FPprivate.h H5FSprivate.h H5Gprivate.h H5Gpkg.h H5HGprivate.h \ + H5HGpkg.h H5HLprivate.h H5HLpkg.h H5HPprivate.h H5Iprivate.h \ + H5MFprivate.h H5MMprivate.h H5Oprivate.h H5Opkg.h H5Pprivate.h \ + H5Ppkg.h H5Rprivate.h H5RCprivate.h H5RSprivate.h H5Sprivate.h \ + H5SLprivate.h H5STprivate.h H5Tprivate.h H5TBprivate.h H5Tpkg.h \ + H5TSprivate.h H5Vprivate.h H5Zprivate.h H5Zpkg.h H5config.h ## Error header generation ## diff --git a/test/cache.c b/test/cache.c index bdd27d1..9b01c2e 100644 --- a/test/cache.c +++ b/test/cache.c @@ -26,8 +26,10 @@ const char *FILENAME[] = { NULL }; +#define H5C_PACKAGE /*suppress error about including H5Cpkg */ + #include "H5TBprivate.h" -#include "H5Cprivate.h" +#include "H5Cpkg.h" /* with apologies for the abuse of terminology... */ @@ -151,7 +153,6 @@ typedef struct test_entry_t * updated as necessary. */ -#define H5C__HASH_TABLE_LEN (32 * 1024) /* must be a power of 2 */ #define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) #define H5C__HASH_FCN(x) (int)(((x) & H5C__HASH_MASK) >> 3) @@ -216,100 +217,6 @@ if ( ( (cache_ptr) == NULL ) || \ } \ } - -/* The following is a local copy of the H5C_t structure -- any changes in - * that structure must be reproduced here. The typedef is used to allow - * local access to the cache's private data. - */ - -#define H5C__H5C_T_MAGIC 0x005CAC0E -#define H5C__MAX_NUM_TYPE_IDS 9 - -typedef struct local_H5C_t -{ - uint32_t magic; - - int32_t max_type_id; - const char * (* type_name_table_ptr); - - size_t max_cache_size; - size_t min_clean_size; - - H5C_write_permitted_func_t check_write_permitted; - - int32_t index_len; - size_t index_size; - H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]); - - - int32_t tree_len; - size_t tree_size; - H5TB_TREE * tree_ptr; - - int32_t pl_len; - size_t pl_size; - H5C_cache_entry_t * pl_head_ptr; - H5C_cache_entry_t * pl_tail_ptr; - - int32_t LRU_list_len; - size_t LRU_list_size; - H5C_cache_entry_t * LRU_head_ptr; - H5C_cache_entry_t * LRU_tail_ptr; - - int32_t cLRU_list_len; - size_t cLRU_list_size; - H5C_cache_entry_t * cLRU_head_ptr; - H5C_cache_entry_t * cLRU_tail_ptr; - - int32_t dLRU_list_len; - size_t dLRU_list_size; - H5C_cache_entry_t * dLRU_head_ptr; - H5C_cache_entry_t * dLRU_tail_ptr; - -#if H5C_COLLECT_CACHE_STATS - - /* stats fields */ - int64_t hits[H5C__MAX_NUM_TYPE_IDS]; - int64_t misses[H5C__MAX_NUM_TYPE_IDS]; - int64_t insertions[H5C__MAX_NUM_TYPE_IDS]; - int64_t clears[H5C__MAX_NUM_TYPE_IDS]; - int64_t flushes[H5C__MAX_NUM_TYPE_IDS]; - int64_t evictions[H5C__MAX_NUM_TYPE_IDS]; - int64_t renames[H5C__MAX_NUM_TYPE_IDS]; - - int64_t total_ht_insertions; - int64_t total_ht_deletions; - int64_t successful_ht_searches; - int64_t total_successful_ht_search_depth; - int64_t failed_ht_searches; - int64_t total_failed_ht_search_depth; - - int32_t max_index_len; - size_t max_index_size; - - int32_t max_tree_len; - size_t max_tree_size; - - int32_t max_pl_len; - size_t max_pl_size; - -#if H5C_COLLECT_CACHE_ENTRY_STATS - - int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS]; - int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS]; - int32_t max_clears[H5C__MAX_NUM_TYPE_IDS]; - int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS]; - size_t max_size[H5C__MAX_NUM_TYPE_IDS]; - -#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ - -#endif /* H5C_COLLECT_CACHE_STATS */ - - hbool_t skip_file_checks; - hbool_t skip_dxpl_id_checks; - -} local_H5C_t; - /* global variable declarations: */ @@ -623,6 +530,14 @@ static void row_major_scan_forward(H5C_t * cache_ptr, int dirty_destroys, int dirty_unprotects); +static void hl_row_major_scan_forward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts); + static void row_major_scan_backward(H5C_t * cache_ptr, int32_t lag, hbool_t verbose, @@ -637,6 +552,14 @@ static void row_major_scan_backward(H5C_t * cache_ptr, int dirty_destroys, int dirty_unprotects); +static void hl_row_major_scan_backward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts); + static void col_major_scan_forward(H5C_t * cache_ptr, int32_t lag, hbool_t verbose, @@ -647,6 +570,15 @@ static void col_major_scan_forward(H5C_t * cache_ptr, hbool_t dirty_inserts, int dirty_unprotects); +static void hl_col_major_scan_forward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts, + int dirty_unprotects); + static void col_major_scan_backward(H5C_t * cache_ptr, int32_t lag, hbool_t verbose, @@ -657,10 +589,23 @@ static void col_major_scan_backward(H5C_t * cache_ptr, hbool_t dirty_inserts, int dirty_unprotects); +static void hl_col_major_scan_backward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts, + int dirty_unprotects); + static void smoke_check_1(void); static void smoke_check_2(void); static void smoke_check_3(void); static void smoke_check_4(void); +static void smoke_check_5(void); +static void smoke_check_6(void); +static void smoke_check_7(void); +static void smoke_check_8(void); static void write_permitted_check(void); static void check_flush_protected_err(void); static void check_destroy_protected_err(void); @@ -668,6 +613,11 @@ static void check_duplicate_insert_err(void); static void check_rename_err(void); static void check_double_protect_err(void); static void check_double_unprotect_err(void); +static void check_auto_cache_resize(void); +static void check_auto_cache_resize_disable(void); +static void check_auto_cache_resize_epoch_markers(void); +static void check_auto_cache_resize_input_errs(void); +static void check_auto_cache_resize_aux_fcns(void); static void takedown_cache(H5C_t * cache_ptr, hbool_t dump_stats, @@ -1449,6 +1399,10 @@ monster_size(H5F_t * f, void * thing, size_t * size_ptr) * * Modifications: * + * JRM - 10/12/04 + * Removed references to local_H5C_t, as we now get direct + * access to the definition of H5C_t via H5Cpkg.h. + * *------------------------------------------------------------------------- */ @@ -1473,7 +1427,7 @@ entry_in_cache(H5C_t * cache_ptr, HDassert( entry_ptr->type == type ); HDassert( entry_ptr == entry_ptr->self ); - H5C__SEARCH_INDEX(((local_H5C_t *)cache_ptr), entry_ptr->addr, test_ptr) + H5C__SEARCH_INDEX(cache_ptr, entry_ptr->addr, test_ptr) if ( test_ptr != NULL ) { @@ -1885,6 +1839,22 @@ insert_entry(H5C_t * cache_ptr, pass = FALSE; failure_mssg = "error in H5C_insert()."; + +#if 0 + /* This is useful debugging code. Lets keep it around. */ + + HDfprintf(stdout, "result = %d\n", (int)result); + HDfprintf(stdout, "entry_ptr->header.is_protected = %d\n", + (int)(entry_ptr->header.is_protected)); + HDfprintf(stdout, "entry_ptr->header.type != &(types[type]) = %d\n", + (int)(entry_ptr->header.type != &(types[type]))); + HDfprintf(stdout, + "entry_ptr->size != entry_ptr->header.size = %d\n", + (int)(entry_ptr->size != entry_ptr->header.size)); + HDfprintf(stdout, + "entry_ptr->addr != entry_ptr->header.addr = %d\n", + (int)(entry_ptr->addr != entry_ptr->header.addr)); +#endif } HDassert( ((entry_ptr->header).type)->id == type ); @@ -2036,6 +2006,28 @@ protect_entry(H5C_t * cache_ptr, ( entry_ptr->size != entry_ptr->header.size ) || ( entry_ptr->addr != entry_ptr->header.addr ) ) { +#if 0 + /* I've written the following debugging code several times + * now. Lets keep it around so I don't have to write it + * again. + * - JRM + */ + HDfprintf(stdout, "( cache_entry_ptr != (void *)entry_ptr ) = %d\n", + (int)( cache_entry_ptr != (void *)entry_ptr )); + HDfprintf(stdout, "cache_entry_ptr = 0x%lx, entry_ptr = 0x%lx\n", + (long)cache_entry_ptr, (long)entry_ptr); + HDfprintf(stdout, "entry_ptr->header.is_protected = %d\n", + (int)(entry_ptr->header.is_protected)); + HDfprintf(stdout, + "( entry_ptr->header.type != &(types[type]) ) = %d\n", + (int)( entry_ptr->header.type != &(types[type]) )); + HDfprintf(stdout, + "entry_ptr->size = %d, entry_ptr->header.size = %d\n", + (int)(entry_ptr->size), (int)(entry_ptr->header.size)); + HDfprintf(stdout, + "entry_ptr->addr = %d, entry_ptr->header.addr = %d\n", + (int)(entry_ptr->addr), (int)(entry_ptr->header.addr)); +#endif pass = FALSE; failure_mssg = "error in H5C_protect()."; @@ -2356,6 +2348,104 @@ row_major_scan_forward(H5C_t * cache_ptr, /*------------------------------------------------------------------------- + * Function: hl_row_major_scan_forward() + * + * Purpose: Do a high locality sequence of inserts, protects, and + * unprotects while scanning through the set of entries. + * If pass is false on entry, do nothing. + * + * Return: void + * + * Programmer: John Mainzer + * 10/21/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +hl_row_major_scan_forward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts) +{ + const char * fcn_name = "hl_row_major_scan_forward"; + int32_t type; + int32_t idx; + int32_t i; + int32_t lag = 100; + + if ( verbose ) + HDfprintf(stdout, "%s(): entering.\n", fcn_name); + + HDassert( lag > 5 ); + + type = 0; + + if ( ( pass ) && ( reset_stats ) ) { + + H5C_stats__reset(cache_ptr); + } + + while ( ( pass ) && ( type < NUMBER_OF_ENTRY_TYPES ) ) + { + idx = -lag; + + while ( ( pass ) && ( idx <= (max_indices[type] + lag) ) ) + { + if ( ( pass ) && ( do_inserts ) && ( (idx + lag) >= 0 ) && + ( (idx + lag) <= max_indices[type] ) && + ( ((idx + lag) % 2) == 0 ) && + ( ! entry_in_cache(cache_ptr, type, (idx + lag)) ) ) { + + if ( verbose ) + HDfprintf(stdout, "(i, %d, %d) ", type, (idx + lag)); + + insert_entry(cache_ptr, type, (idx + lag), dirty_inserts); + } + + i = idx; + + while ( ( pass ) && ( i >= idx - lag ) && ( i >= 0 ) ) + { + if ( ( pass ) && ( i >= 0 ) && ( i <= max_indices[type] ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p, %d, %d) ", type, i); + + protect_entry(cache_ptr, type, i); + + if ( verbose ) + HDfprintf(stdout, "(u, %d, %d) ", type, i); + + unprotect_entry(cache_ptr, type, i, NO_CHANGE, FALSE); + } + i--; + } + + if ( verbose ) + HDfprintf(stdout, "\n"); + + idx++; + } + type++; + } + + if ( ( pass ) && ( display_stats ) ) { + + H5C_stats(cache_ptr, "test cache", display_detailed_stats); + } + + return; + +} /* hl_row_major_scan_forward() */ + + +/*------------------------------------------------------------------------- * Function: row_major_scan_backward() * * Purpose: Do a sequence of inserts, protects, unprotects, renames, @@ -2579,6 +2669,104 @@ row_major_scan_backward(H5C_t * cache_ptr, /*------------------------------------------------------------------------- + * Function: hl_row_major_scan_backward() + * + * Purpose: Do a high locality sequence of inserts, protects, and + * unprotects while scanning through the set of entries. + * If pass is false on entry, do nothing. + * + * Return: void + * + * Programmer: John Mainzer + * 10/21/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +hl_row_major_scan_backward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts) +{ + const char * fcn_name = "hl_row_major_scan_backward"; + int32_t type; + int32_t idx; + int32_t i; + int32_t lag = 100; + + if ( verbose ) + HDfprintf(stdout, "%s(): entering.\n", fcn_name); + + HDassert( lag > 5 ); + + type = NUMBER_OF_ENTRY_TYPES - 1; + + if ( ( pass ) && ( reset_stats ) ) { + + H5C_stats__reset(cache_ptr); + } + + while ( ( pass ) && ( type >= 0 ) ) + { + idx = max_indices[type] + lag; + + while ( ( pass ) && ( idx >= -lag ) ) + { + if ( ( pass ) && ( do_inserts ) && ( (idx + lag) >= 0 ) && + ( (idx + lag) <= max_indices[type] ) && + ( ((idx + lag) % 2) == 0 ) && + ( ! entry_in_cache(cache_ptr, type, (idx + lag)) ) ) { + + if ( verbose ) + HDfprintf(stdout, "(i, %d, %d) ", type, (idx + lag)); + + insert_entry(cache_ptr, type, (idx + lag), dirty_inserts); + } + + i = idx; + + while ( ( pass ) && ( i >= idx - lag ) && ( i >= 0 ) ) + { + if ( ( pass ) && ( i >= 0 ) && ( i <= max_indices[type] ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p, %d, %d) ", type, i); + + protect_entry(cache_ptr, type, i); + + if ( verbose ) + HDfprintf(stdout, "(u, %d, %d) ", type, i); + + unprotect_entry(cache_ptr, type, i, NO_CHANGE, FALSE); + } + i--; + } + + if ( verbose ) + HDfprintf(stdout, "\n"); + + idx--; + } + type--; + } + + if ( ( pass ) && ( display_stats ) ) { + + H5C_stats(cache_ptr, "test cache", display_detailed_stats); + } + + return; + +} /* hl_row_major_scan_backward() */ + + +/*------------------------------------------------------------------------- * Function: col_major_scan_forward() * * Purpose: Do a sequence of inserts, protects, and unprotects @@ -2679,16 +2867,16 @@ col_major_scan_forward(H5C_t * cache_ptr, /*------------------------------------------------------------------------- - * Function: col_major_scan_backward() + * Function: hl_col_major_scan_forward() * - * Purpose: Do a sequence of inserts, protects, and unprotects - * while scanning backwards through the set of - * entries. If pass is false on entry, do nothing. + * Purpose: Do a high locality sequence of inserts, protects, and + * unprotects while scanning through the set of entries. If + * pass is false on entry, do nothing. * * Return: void * * Programmer: John Mainzer - * 6/23/04 + * 19/25/04 * * Modifications: * @@ -2696,95 +2884,310 @@ col_major_scan_forward(H5C_t * cache_ptr, */ static void -col_major_scan_backward(H5C_t * cache_ptr, - int32_t lag, - hbool_t verbose, - hbool_t reset_stats, - hbool_t display_stats, - hbool_t display_detailed_stats, - hbool_t do_inserts, - hbool_t dirty_inserts, - int dirty_unprotects) +hl_col_major_scan_forward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts, + int dirty_unprotects) { - const char * fcn_name = "col_major_scan_backward()"; - int mile_stone = 1; + const char * fcn_name = "hl_col_major_scan_forward()"; int32_t type; int32_t idx; + int32_t lag = 200; + int32_t i; if ( verbose ) HDfprintf(stdout, "%s: entering.\n", fcn_name); HDassert( lag > 5 ); + type = 0; + if ( ( pass ) && ( reset_stats ) ) { H5C_stats__reset(cache_ptr); } - idx = MAX_ENTRIES + lag; + idx = 0; - if ( verbose ) /* 1 */ - HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++); + while ( ( pass ) && ( idx <= MAX_ENTRIES ) ) + { + i = idx; - while ( ( pass ) && ( (idx + lag) >= 0 ) ) - { - type = NUMBER_OF_ENTRY_TYPES - 1; + while ( ( pass ) && ( i >= 0 ) && ( i >= (idx - lag) ) ) { - while ( ( pass ) && ( type >= 0 ) ) - { - if ( ( pass ) && ( do_inserts) && ( (idx - lag) >= 0 ) && - ( (idx - lag) <= max_indices[type] ) && - ( ((idx - lag) % 3) == 0 ) && - ( ! entry_in_cache(cache_ptr, type, (idx - lag)) ) ) { + type = 0; - if ( verbose ) - HDfprintf(stdout, "(i, %d, %d) ", type, (idx - lag)); + while ( ( pass ) && ( type < NUMBER_OF_ENTRY_TYPES ) ) + { + if ( ( pass ) && ( do_inserts ) && ( i == idx ) && + ( i <= max_indices[type] ) && + ( (i % 3) == 0 ) && + ( ! entry_in_cache(cache_ptr, type, i) ) ) { - insert_entry(cache_ptr, type, (idx - lag), dirty_inserts); - } + if ( verbose ) + HDfprintf(stdout, "(i, %d, %d) ", type, i); - if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) { + insert_entry(cache_ptr, type, i, dirty_inserts); + } - if ( verbose ) - HDfprintf(stdout, "(p, %d, %d) ", type, idx); + if ( ( pass ) && ( i >= 0 ) && ( i <= max_indices[type] ) ) { - protect_entry(cache_ptr, type, idx); - } + if ( verbose ) + HDfprintf(stdout, "(p, %d, %d) ", type, i); - if ( ( pass ) && ( (idx + lag) >= 0 ) && - ( (idx + lag) <= max_indices[type] ) ) { + protect_entry(cache_ptr, type, i); + } + + if ( ( pass ) && ( i >= 0 ) && + ( i <= max_indices[type] ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u, %d, %d) ", type, i); + unprotect_entry(cache_ptr, type, i, + dirty_unprotects, FALSE); + } + if ( verbose ) - HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag)); + HDfprintf(stdout, "\n"); - unprotect_entry(cache_ptr, type, idx + lag, - dirty_unprotects, FALSE); + type++; } - - if ( verbose ) - HDfprintf(stdout, "\n"); - type--; + i--; } - idx--; + idx++; } - if ( verbose ) /* 2 */ - HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++); - if ( ( pass ) && ( display_stats ) ) { H5C_stats(cache_ptr, "test cache", display_detailed_stats); } - if ( verbose ) - HDfprintf(stdout, "%s: exiting.\n", fcn_name); - return; -} /* col_major_scan_backward() */ +} /* hl_col_major_scan_forward() */ + + +/*------------------------------------------------------------------------- + * Function: col_major_scan_backward() + * + * Purpose: Do a sequence of inserts, protects, and unprotects + * while scanning backwards through the set of + * entries. If pass is false on entry, do nothing. + * + * Return: void + * + * Programmer: John Mainzer + * 6/23/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +col_major_scan_backward(H5C_t * cache_ptr, + int32_t lag, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts, + int dirty_unprotects) +{ + const char * fcn_name = "col_major_scan_backward()"; + int mile_stone = 1; + int32_t type; + int32_t idx; + + if ( verbose ) + HDfprintf(stdout, "%s: entering.\n", fcn_name); + + HDassert( lag > 5 ); + + if ( ( pass ) && ( reset_stats ) ) { + + H5C_stats__reset(cache_ptr); + } + + idx = MAX_ENTRIES + lag; + + if ( verbose ) /* 1 */ + HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++); + + + while ( ( pass ) && ( (idx + lag) >= 0 ) ) + { + type = NUMBER_OF_ENTRY_TYPES - 1; + + while ( ( pass ) && ( type >= 0 ) ) + { + if ( ( pass ) && ( do_inserts) && ( (idx - lag) >= 0 ) && + ( (idx - lag) <= max_indices[type] ) && + ( ((idx - lag) % 3) == 0 ) && + ( ! entry_in_cache(cache_ptr, type, (idx - lag)) ) ) { + + if ( verbose ) + HDfprintf(stdout, "(i, %d, %d) ", type, (idx - lag)); + + insert_entry(cache_ptr, type, (idx - lag), dirty_inserts); + } + + if ( ( pass ) && ( idx >= 0 ) && ( idx <= max_indices[type] ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p, %d, %d) ", type, idx); + + protect_entry(cache_ptr, type, idx); + } + + if ( ( pass ) && ( (idx + lag) >= 0 ) && + ( (idx + lag) <= max_indices[type] ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u, %d, %d) ", type, (idx + lag)); + + unprotect_entry(cache_ptr, type, idx + lag, + dirty_unprotects, FALSE); + } + + if ( verbose ) + HDfprintf(stdout, "\n"); + + type--; + } + + idx--; + } + + if ( verbose ) /* 2 */ + HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++); + + if ( ( pass ) && ( display_stats ) ) { + + H5C_stats(cache_ptr, "test cache", display_detailed_stats); + } + + if ( verbose ) + HDfprintf(stdout, "%s: exiting.\n", fcn_name); + + return; + +} /* col_major_scan_backward() */ + + +/*------------------------------------------------------------------------- + * Function: hl_col_major_scan_backward() + * + * Purpose: Do a high locality sequence of inserts, protects, and + * unprotects while scanning backwards through the set of + * entries. If pass is false on entry, do nothing. + * + * Return: void + * + * Programmer: John Mainzer + * 10/25/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +hl_col_major_scan_backward(H5C_t * cache_ptr, + hbool_t verbose, + hbool_t reset_stats, + hbool_t display_stats, + hbool_t display_detailed_stats, + hbool_t do_inserts, + hbool_t dirty_inserts, + int dirty_unprotects) +{ + const char * fcn_name = "hl_col_major_scan_backward()"; + int32_t type; + int32_t idx; + int32_t lag = 50; + int32_t i; + + if ( verbose ) + HDfprintf(stdout, "%s: entering.\n", fcn_name); + + type = 0; + + if ( ( pass ) && ( reset_stats ) ) { + + H5C_stats__reset(cache_ptr); + } + + idx = MAX_ENTRIES; + + while ( ( pass ) && ( idx >= 0 ) ) + { + + i = idx; + + while ( ( pass ) && ( i <= MAX_ENTRIES ) && ( i <= (idx + lag) ) ) { + + type = 0; + + while ( ( pass ) && ( type < NUMBER_OF_ENTRY_TYPES ) ) + { + if ( ( pass ) && ( do_inserts ) && ( i == idx ) && + ( i <= max_indices[type] ) && + ( ! entry_in_cache(cache_ptr, type, i) ) ) { + + if ( verbose ) + HDfprintf(stdout, "(i, %d, %d) ", type, i); + + insert_entry(cache_ptr, type, i, dirty_inserts); + } + + if ( ( pass ) && ( i >= 0 ) && ( i <= max_indices[type] ) ) { + + if ( verbose ) + HDfprintf(stdout, "(p, %d, %d) ", type, i); + + protect_entry(cache_ptr, type, i); + } + + if ( ( pass ) && ( i >= 0 ) && + ( i <= max_indices[type] ) ) { + + if ( verbose ) + HDfprintf(stdout, "(u, %d, %d) ", type, i); + + unprotect_entry(cache_ptr, type, i, + dirty_unprotects, FALSE); + } + + if ( verbose ) + HDfprintf(stdout, "\n"); + + type++; + } + + i++; + } + + idx--; + } + + if ( ( pass ) && ( display_stats ) ) { + + H5C_stats(cache_ptr, "test cache", display_detailed_stats); + } + + return; + +} /* hl_col_major_scan_backward() */ /**************************************************************************/ @@ -3488,18 +3891,17 @@ smoke_check_4(void) /*------------------------------------------------------------------------- - * Function: write_permitted_check() + * Function: smoke_check_5() * - * Purpose: A basic test of the write permitted function. In essence, - * we load the cache up with dirty entryies, set - * write_permitted to FALSE, and then protect a bunch of - * entries. If there are any writes while write_permitted is - * FALSE, the test will fail. + * Purpose: A basic functional test on a cache with automatic cache + * resizing enabled, with inserts in the mix, along with + * repeated protects and unprotects. All entries are marked + * as clean. * * Return: void * * Programmer: John Mainzer - * 6/24/04 + * 10/14/04 * * Modifications: * @@ -3507,23 +3909,61 @@ smoke_check_4(void) */ static void -write_permitted_check(void) +smoke_check_5(void) { - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - - const char * fcn_name = "write_permitted_check"; + const char * fcn_name = "smoke_check_5"; + herr_t result; hbool_t show_progress = FALSE; + hbool_t dirty_inserts = FALSE; + int dirty_unprotects = FALSE; hbool_t display_stats = FALSE; - int32_t lag = 10; int mile_stone = 1; H5C_t * cache_ptr = NULL; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, +#if 1 + /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, +#else + /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, +#endif + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (2 * 1024 * 1024), -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + /* double min_clean_fraction = */ 0.1, - TESTING("write permitted check -- 1/0 MB cache"); + /* size_t max_size = */ (32 * 1024 * 1025), + /* size_t min_size = */ (512 * 1024), -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + /* int64_t epoch_length = */ 50000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__threshold, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.9, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.5 + }; + + TESTING("smoke check #5 -- all clean, ins, prot, unprot, AR cache 1"); pass = TRUE; @@ -3537,66 +3977,55 @@ write_permitted_check(void) HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); - cache_ptr = setup_cache((size_t)(1 * 1024 * 1024), - (size_t)(0)); + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); - row_major_scan_forward(/* cache_ptr */ cache_ptr, - /* lag */ lag, - /* verbose */ FALSE, - /* reset_stats */ TRUE, - /* display_stats */ display_stats, - /* display_detailed_stats */ TRUE, - /* do_inserts */ TRUE, - /* dirty_inserts */ TRUE, - /* do_renames */ TRUE, - /* rename_to_main_addr */ FALSE, - /* do_destroys */ TRUE, - /* dirty_destroys */ TRUE, - /* dirty_unprotects */ TRUE); + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); - write_permitted = FALSE; - - row_major_scan_backward(/* cache_ptr */ cache_ptr, - /* lag */ lag, - /* verbose */ FALSE, - /* reset_stats */ TRUE, - /* display_stats */ display_stats, - /* display_detailed_stats */ TRUE, - /* do_inserts */ FALSE, - /* dirty_inserts */ FALSE, - /* do_renames */ TRUE, - /* rename_to_main_addr */ TRUE, - /* do_destroys */ FALSE, - /* dirty_destroys */ FALSE, - /* dirty_unprotects */ NO_CHANGE); + hl_row_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); - write_permitted = TRUE; - - row_major_scan_forward(/* cache_ptr */ cache_ptr, - /* lag */ lag, - /* verbose */ FALSE, - /* reset_stats */ TRUE, - /* display_stats */ display_stats, - /* display_detailed_stats */ TRUE, - /* do_inserts */ TRUE, - /* dirty_inserts */ TRUE, - /* do_renames */ TRUE, - /* rename_to_main_addr */ FALSE, - /* do_destroys */ FALSE, - /* dirty_destroys */ TRUE, - /* dirty_unprotects */ TRUE); + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts); if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", @@ -3613,44 +4042,49 @@ write_permitted_check(void) HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); - col_major_scan_forward(/* cache_ptr */ cache_ptr, - /* lag */ lag, - /* verbose */ FALSE, - /* reset_stats */ TRUE, - /* display_stats */ display_stats, - /* display_detailed_stats */ TRUE, - /* do_inserts */ TRUE, - /* dirty_inserts */ TRUE, - /* dirty_unprotects */ TRUE); + hl_col_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); - write_permitted = FALSE; - - col_major_scan_backward(/* cache_ptr */ cache_ptr, - /* lag */ lag, - /* verbose */ FALSE, - /* reset_stats */ TRUE, - /* display_stats */ display_stats, - /* display_detailed_stats */ TRUE, - /* do_inserts */ FALSE, - /* dirty_inserts */ FALSE, - /* dirty_unprotects */ NO_CHANGE); + /* flush all entries in the cache: */ - write_permitted = TRUE; + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ FALSE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); - takedown_cache(cache_ptr, display_stats, TRUE); + hl_col_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", fcn_name, mile_stone++, (int)pass); + takedown_cache(cache_ptr, display_stats, TRUE); + + if ( show_progress ) /* 11 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + verify_clean(); verify_unprotected(); @@ -3660,27 +4094,21 @@ write_permitted_check(void) HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - - SKIPPED(); - - HDfprintf(stdout, " Clean and dirty LRU lists disabled.\n"); - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -} /* write_permitted_check() */ +} /* smoke_check_5() */ /*------------------------------------------------------------------------- - * Function: check_flush_protected_err() + * Function: smoke_check_6() * - * Purpose: Verify that an attempt to flush the cache when it contains - * a protected entry will generate an error. + * Purpose: A basic functional test on a cache with automatic cache + * resizing enabled, with inserts in the mix, along with + * repeated protects and unprotects. About one half of all + * entries are marked as dirty. * * Return: void * * Programmer: John Mainzer - * 6/24/04 + * 10/25/04 * * Modifications: * @@ -3688,116 +4116,184 @@ write_permitted_check(void) */ static void -check_flush_protected_err(void) +smoke_check_6(void) { - const char * fcn_name = "check_flush_protected_err"; + const char * fcn_name = "smoke_check_6"; + herr_t result; + hbool_t show_progress = FALSE; + hbool_t dirty_inserts = TRUE; + int dirty_unprotects = FALSE; + hbool_t display_stats = FALSE; + int mile_stone = 1; H5C_t * cache_ptr = NULL; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, +#if 1 + /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, +#else + /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, +#endif + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (2 * 1024 * 1024), - TESTING("flush cache with protected entry error"); + /* double min_clean_fraction = */ 0.1, - pass = TRUE; + /* size_t max_size = */ (32 * 1024 * 1025), + /* size_t min_size = */ (512 * 1024), - /* allocate a cache, protect an entry, and try to flush. This - * should fail. Unprotect the entry and flush again -- should - * succeed. - */ + /* int64_t epoch_length = */ 50000, - if ( pass ) { - reset_entries(); + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, - cache_ptr = setup_cache((size_t)(2 * 1024), - (size_t)(1 * 1024)); + /* double lower_hr_threshold = */ 0.75, - protect_entry(cache_ptr, 0, 0); + /* double increment = */ 2.0, - if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, 0) >= 0 ) { + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), - pass = FALSE; - failure_mssg = "flush succeeded on cache with protected entry.\n"; - } else { + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__threshold, - unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE); + /* double upper_hr_threshold = */ 0.995, - if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, 0) < 0 ) { + /* double decrement = */ 0.9, - pass = FALSE; - failure_mssg = "flush failed after unprotect.\n"; + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), - } else { + /* int32_t epochs_before_eviction = */ 3, - takedown_cache(cache_ptr, FALSE, FALSE); - } + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.05 + }; + + TESTING("smoke check #6 -- ~1/2 dirty, ins, prot, unprot, AR cache 1"); + + pass = TRUE; + + if ( show_progress ) /* 1 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + reset_entries(); + + if ( show_progress ) /* 2 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; } } - if ( pass ) { PASSED(); } else { H5_FAILED(); } + if ( show_progress ) /* 3 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - if ( ! pass ) - HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); -} /* check_flush_protected_err() */ + if ( show_progress ) /* 4 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - -/*------------------------------------------------------------------------- - * Function: check_destroy_protected_err() - * - * Purpose: Verify that an attempt to destroy the cache when it contains - * a protected entry will generate an error. - * - * Return: void - * - * Programmer: John Mainzer - * 6/24/04 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ + hl_row_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); -static void -check_destroy_protected_err(void) -{ - const char * fcn_name = "check_destroy_protected_err"; - H5C_t * cache_ptr = NULL; + if ( show_progress ) /* 5 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - TESTING("destroy cache with protected entry error"); + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts); - pass = TRUE; + if ( show_progress ) /* 6 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - /* allocate a cache, protect an entry, and try to flush. This - * should fail. Unprotect the entry and flush again -- should - * succeed. - */ + /* flush and destroy all entries in the cache: */ - if ( pass ) { + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ TRUE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); - reset_entries(); + if ( show_progress ) /* 7 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - cache_ptr = setup_cache((size_t)(2 * 1024), - (size_t)(1 * 1024)); + hl_col_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); - protect_entry(cache_ptr, 0, 0); + if ( show_progress ) /* 8 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - if ( H5C_dest(NULL, -1, -1, cache_ptr) >= 0 ) { + /* flush all entries in the cache: */ - pass = FALSE; - failure_mssg = "destroy succeeded on cache with protected entry.\n"; + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ FALSE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); - } else { + if ( show_progress ) /* 9 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE); + hl_col_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); - if ( H5C_dest(NULL, -1, -1, cache_ptr) < 0 ) { + if ( show_progress ) /* 10 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); - pass = FALSE; - failure_mssg = "destroy failed after unprotect.\n"; + takedown_cache(cache_ptr, display_stats, TRUE); - } - } - } + if ( show_progress ) /* 11 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + verify_clean(); + verify_unprotected(); if ( pass ) { PASSED(); } else { H5_FAILED(); } @@ -3805,19 +4301,21 @@ check_destroy_protected_err(void) HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", fcn_name, failure_mssg); -} /* check_destroy_protected_err() */ +} /* smoke_check_6() */ /*------------------------------------------------------------------------- - * Function: check_duplicate_insert_err() + * Function: smoke_check_7() * - * Purpose: Verify that an attempt to insert and entry that is - * alread in the cache will generate an error. + * Purpose: A basic functional test on a cache with automatic cache + * resizing enabled, with inserts in the mix, along with + * repeated protects and unprotects. All entries are marked + * as clean. * * Return: void * * Programmer: John Mainzer - * 6/24/04 + * 12/2/04 * * Modifications: * @@ -3825,287 +4323,9962 @@ check_destroy_protected_err(void) */ static void -check_duplicate_insert_err(void) +smoke_check_7(void) { - const char * fcn_name = "check_duplicate_insert_err"; + const char * fcn_name = "smoke_check_7"; herr_t result; + hbool_t show_progress = FALSE; + hbool_t dirty_inserts = FALSE; + int dirty_unprotects = FALSE; + hbool_t display_stats = FALSE; + int mile_stone = 1; H5C_t * cache_ptr = NULL; - test_entry_t * base_addr; - test_entry_t * entry_ptr; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, +#if 1 + /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, +#else + /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, +#endif + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (2 * 1024 * 1024), - TESTING("duplicate entry insertion error"); + /* double min_clean_fraction = */ 0.1, + + /* size_t max_size = */ (32 * 1024 * 1025), + /* size_t min_size = */ (512 * 1024), + + /* int64_t epoch_length = */ 100000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (8 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ + H5C_decr__age_out_with_threshold, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.9, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.1 + }; + + TESTING("smoke check #7 -- all clean, ins, prot, unprot, AR cache 2"); + + pass = TRUE; + + if ( show_progress ) /* 1 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + reset_entries(); + + if ( show_progress ) /* 2 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } + + if ( show_progress ) /* 3 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); + + if ( show_progress ) /* 4 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_row_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); + + if ( show_progress ) /* 5 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts); + + if ( show_progress ) /* 6 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + /* flush and destroy all entries in the cache: */ + + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ TRUE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); + + if ( show_progress ) /* 7 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_col_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); + + if ( show_progress ) /* 8 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + /* flush all entries in the cache: */ + + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ FALSE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); + + if ( show_progress ) /* 9 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_col_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); + + if ( show_progress ) /* 10 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + takedown_cache(cache_ptr, display_stats, TRUE); + + if ( show_progress ) /* 11 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + verify_clean(); + verify_unprotected(); + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* smoke_check_7() */ + + +/*------------------------------------------------------------------------- + * Function: smoke_check_8() + * + * Purpose: A basic functional test on a cache with automatic cache + * resizing enabled, with inserts in the mix, along with + * repeated protects and unprotects. About one half of all + * entries are marked as dirty. + * + * Return: void + * + * Programmer: John Mainzer + * 10/25/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +smoke_check_8(void) +{ + const char * fcn_name = "smoke_check_8"; + herr_t result; + hbool_t show_progress = FALSE; + hbool_t dirty_inserts = TRUE; + int dirty_unprotects = FALSE; + hbool_t display_stats = FALSE; + int mile_stone = 1; + H5C_t * cache_ptr = NULL; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, +#if 1 + /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, +#else + /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, +#endif + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (2 * 1024 * 1024), + + /* double min_clean_fraction = */ 0.1, + + /* size_t max_size = */ (32 * 1024 * 1025), + /* size_t min_size = */ (512 * 1024), + + /* int64_t epoch_length = */ 100000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ + H5C_decr__age_out_with_threshold, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.9, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.1 + }; + + TESTING("smoke check #8 -- ~1/2 dirty, ins, prot, unprot, AR cache 2"); + + pass = TRUE; + + if ( show_progress ) /* 1 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + reset_entries(); + + if ( show_progress ) /* 2 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } + + if ( show_progress ) /* 3 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); + + if ( show_progress ) /* 4 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_row_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ FALSE, + /* dirty_inserts */ dirty_inserts); + + if ( show_progress ) /* 5 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts); + + if ( show_progress ) /* 6 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + /* flush and destroy all entries in the cache: */ + + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ TRUE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); + + if ( show_progress ) /* 7 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_col_major_scan_forward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); + + if ( show_progress ) /* 8 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + /* flush all entries in the cache: */ + + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ FALSE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); + + if ( show_progress ) /* 9 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + hl_col_major_scan_backward(/* cache_ptr */ cache_ptr, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ FALSE, + /* do_inserts */ TRUE, + /* dirty_inserts */ dirty_inserts, + /* dirty_unprotects */ dirty_unprotects); + + if ( show_progress ) /* 10 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + takedown_cache(cache_ptr, display_stats, TRUE); + + if ( show_progress ) /* 11 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + verify_clean(); + verify_unprotected(); + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* smoke_check_8() */ + + +/*------------------------------------------------------------------------- + * Function: write_permitted_check() + * + * Purpose: A basic test of the write permitted function. In essence, + * we load the cache up with dirty entryies, set + * write_permitted to FALSE, and then protect a bunch of + * entries. If there are any writes while write_permitted is + * FALSE, the test will fail. + * + * Return: void + * + * Programmer: John Mainzer + * 6/24/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +write_permitted_check(void) +{ + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + + const char * fcn_name = "write_permitted_check"; + hbool_t show_progress = FALSE; + hbool_t display_stats = FALSE; + int32_t lag = 10; + int mile_stone = 1; + H5C_t * cache_ptr = NULL; + +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + + TESTING("write permitted check -- 1/0 MB cache"); + +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + + pass = TRUE; + + if ( show_progress ) /* 1 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + reset_entries(); + + if ( show_progress ) /* 2 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + cache_ptr = setup_cache((size_t)(1 * 1024 * 1024), + (size_t)(0)); + + if ( show_progress ) /* 3 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* lag */ lag, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ TRUE, + /* do_inserts */ TRUE, + /* dirty_inserts */ TRUE, + /* do_renames */ TRUE, + /* rename_to_main_addr */ FALSE, + /* do_destroys */ TRUE, + /* dirty_destroys */ TRUE, + /* dirty_unprotects */ TRUE); + + if ( show_progress ) /* 4 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + write_permitted = FALSE; + + row_major_scan_backward(/* cache_ptr */ cache_ptr, + /* lag */ lag, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ TRUE, + /* do_inserts */ FALSE, + /* dirty_inserts */ FALSE, + /* do_renames */ TRUE, + /* rename_to_main_addr */ TRUE, + /* do_destroys */ FALSE, + /* dirty_destroys */ FALSE, + /* dirty_unprotects */ NO_CHANGE); + + if ( show_progress ) /* 5 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + write_permitted = TRUE; + + row_major_scan_forward(/* cache_ptr */ cache_ptr, + /* lag */ lag, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ TRUE, + /* do_inserts */ TRUE, + /* dirty_inserts */ TRUE, + /* do_renames */ TRUE, + /* rename_to_main_addr */ FALSE, + /* do_destroys */ FALSE, + /* dirty_destroys */ TRUE, + /* dirty_unprotects */ TRUE); + + if ( show_progress ) /* 6 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + /* flush and destroy all entries in the cache: */ + + flush_cache(/* cache_ptr */ cache_ptr, + /* destroy_entries */ TRUE, + /* dump_stats */ FALSE, + /* dump_detailed_stats */ FALSE); + + if ( show_progress ) /* 7 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + col_major_scan_forward(/* cache_ptr */ cache_ptr, + /* lag */ lag, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ TRUE, + /* do_inserts */ TRUE, + /* dirty_inserts */ TRUE, + /* dirty_unprotects */ TRUE); + + if ( show_progress ) /* 8 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + write_permitted = FALSE; + + col_major_scan_backward(/* cache_ptr */ cache_ptr, + /* lag */ lag, + /* verbose */ FALSE, + /* reset_stats */ TRUE, + /* display_stats */ display_stats, + /* display_detailed_stats */ TRUE, + /* do_inserts */ FALSE, + /* dirty_inserts */ FALSE, + /* dirty_unprotects */ NO_CHANGE); + + write_permitted = TRUE; + + if ( show_progress ) /* 9 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + takedown_cache(cache_ptr, display_stats, TRUE); + + if ( show_progress ) /* 10 */ + HDfprintf(stdout, "%s() - %0d -- pass = %d\n", + fcn_name, mile_stone++, (int)pass); + + verify_clean(); + verify_unprotected(); + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + + SKIPPED(); + + HDfprintf(stdout, " Clean and dirty LRU lists disabled.\n"); + +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +} /* write_permitted_check() */ + + +/*------------------------------------------------------------------------- + * Function: check_flush_protected_err() + * + * Purpose: Verify that an attempt to flush the cache when it contains + * a protected entry will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 6/24/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_flush_protected_err(void) +{ + const char * fcn_name = "check_flush_protected_err"; + H5C_t * cache_ptr = NULL; + + TESTING("flush cache with protected entry error"); + + pass = TRUE; + + /* allocate a cache, protect an entry, and try to flush. This + * should fail. Unprotect the entry and flush again -- should + * succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, 0) >= 0 ) { + + pass = FALSE; + failure_mssg = "flush succeeded on cache with protected entry.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE); + + if ( H5C_flush_cache(NULL, -1, -1, cache_ptr, 0) < 0 ) { + + pass = FALSE; + failure_mssg = "flush failed after unprotect.\n"; + + } else { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + } + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_flush_protected_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_destroy_protected_err() + * + * Purpose: Verify that an attempt to destroy the cache when it contains + * a protected entry will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 6/24/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_destroy_protected_err(void) +{ + const char * fcn_name = "check_destroy_protected_err"; + H5C_t * cache_ptr = NULL; + + TESTING("destroy cache with protected entry error"); + + pass = TRUE; + + /* allocate a cache, protect an entry, and try to flush. This + * should fail. Unprotect the entry and flush again -- should + * succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + if ( H5C_dest(NULL, -1, -1, cache_ptr) >= 0 ) { + + pass = FALSE; + failure_mssg = "destroy succeeded on cache with protected entry.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE); + + if ( H5C_dest(NULL, -1, -1, cache_ptr) < 0 ) { + + pass = FALSE; + failure_mssg = "destroy failed after unprotect.\n"; + + } + } + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_destroy_protected_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_duplicate_insert_err() + * + * Purpose: Verify that an attempt to insert and entry that is + * alread in the cache will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 6/24/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_duplicate_insert_err(void) +{ + const char * fcn_name = "check_duplicate_insert_err"; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * base_addr; + test_entry_t * entry_ptr; + + TESTING("duplicate entry insertion error"); + + pass = TRUE; + + /* allocate a cache, protect an entry, and then try to insert + * the entry again. This should fail. Unprotect the entry and + * destroy the cache -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + if ( pass ) { + + base_addr = entries[0]; + entry_ptr = &(base_addr[0]); + + result = H5C_insert_entry(NULL, -1, -1, cache_ptr, + &(types[0]), entry_ptr->addr, + (void *)entry_ptr); + + if ( result >= 0 ) { + + pass = FALSE; + failure_mssg = "insert of duplicate entry succeeded.\n"; + + } else { + + unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE); + + takedown_cache(cache_ptr, FALSE, FALSE); + } + } + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_duplicate_insert_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_rename_err() + * + * Purpose: Verify that an attempt to rename an entry to the address + * of an existing entry will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 6/24/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_rename_err(void) +{ + const char * fcn_name = "check_rename_err()"; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_0_0_ptr; + test_entry_t * entry_0_1_ptr; + test_entry_t * entry_1_0_ptr; + + TESTING("rename to existing entry errors"); + + pass = TRUE; + + /* allocate a cache, and insert several entries. Try to rename + * entries to other entries resident in the cache. This should + * fail. Destroy the cache -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + insert_entry(cache_ptr, 0, 0, TRUE); + insert_entry(cache_ptr, 0, 1, TRUE); + insert_entry(cache_ptr, 1, 0, TRUE); + + entry_0_0_ptr = &((entries[0])[0]); + entry_0_1_ptr = &((entries[0])[1]); + entry_1_0_ptr = &((entries[1])[0]); + } + + if ( pass ) { + + result = H5C_rename_entry(NULL, cache_ptr, &(types[0]), + entry_0_0_ptr->addr, entry_0_1_ptr->addr); + + if ( result >= 0 ) { + + pass = FALSE; + failure_mssg = "rename to addr of same type succeeded.\n"; + } + } + + if ( pass ) { + + result = H5C_rename_entry(NULL, cache_ptr, &(types[0]), + entry_0_0_ptr->addr, entry_1_0_ptr->addr); + + if ( result >= 0 ) { + + pass = FALSE; + failure_mssg = "rename to addr of different type succeeded.\n"; + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_rename_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_double_protect_err() + * + * Purpose: Verify that an attempt to protect an entry that is already + * protected will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 6/24/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_double_protect_err(void) +{ + const char * fcn_name = "check_double_protect_err()"; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + H5C_cache_entry_t * cache_entry_ptr; + + TESTING("protect a protected entry error"); + + pass = TRUE; + + /* allocate a cache, protect an entry, and then try to protect + * the entry again. This should fail. Unprotect the entry and + * destroy the cache -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[0]), + entry_ptr->addr, NULL, NULL); + + if ( cache_entry_ptr != NULL ) { + + pass = FALSE; + failure_mssg = "attempt to protect a protected entry succeeded.\n"; + } + } + + if ( pass ) { + + unprotect_entry(cache_ptr, 0, 0, FALSE, FALSE); + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_double_protect_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_double_unprotect_err() + * + * Purpose: Verify that an attempt to unprotect an entry that is already + * unprotected will generate an error. + * + * Return: void + * + * Programmer: John Mainzer + * 6/24/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_double_unprotect_err(void) +{ + const char * fcn_name = "check_double_unprotect_err()"; + herr_t result; + H5C_t * cache_ptr = NULL; + test_entry_t * entry_ptr; + + TESTING("unprotect an unprotected entry error"); + + pass = TRUE; + + /* allocate a cache, protect an entry, unprotect it, and then try to + * unprotect the entry again. This should fail. Destroy the cache + * -- should succeed. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + + protect_entry(cache_ptr, 0, 0); + + unprotect_entry(cache_ptr, 0, 0, FALSE, FALSE); + + entry_ptr = &((entries[0])[0]); + } + + if ( pass ) { + + result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]), + entry_ptr->addr, (void *)entry_ptr, FALSE); + + if ( result > 0 ) { + + pass = FALSE; + failure_mssg = + "attempt to unprotect an unprotected entry succeeded 1.\n"; + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_double_unprotect_err() */ + + +/*------------------------------------------------------------------------- + * Function: check_auto_cache_resize() + * + * Purpose: Exercise the automatic cache resizing functionality. + * The objective is to operate the auto-resize code in + * all possible modes. Unfortunately, there are quite + * a few of them. + * + * Return: void + * + * Programmer: John Mainzer + * 10/29/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +hbool_t rpt_fcn_called = FALSE; +enum H5C_resize_status rpt_status; + +void test_rpt_fcn(UNUSED H5C_t * cache_ptr, + UNUSED int32_t version, + UNUSED double hit_rate, + UNUSED enum H5C_resize_status status, + UNUSED size_t old_max_cache_size, + UNUSED size_t new_max_cache_size, + UNUSED size_t old_min_clean_size, + UNUSED size_t new_min_clean_size); + +void test_rpt_fcn(UNUSED H5C_t * cache_ptr, + UNUSED int32_t version, + UNUSED double hit_rate, + UNUSED enum H5C_resize_status status, + UNUSED size_t old_max_cache_size, + UNUSED size_t new_max_cache_size, + UNUSED size_t old_min_clean_size, + UNUSED size_t new_min_clean_size) +{ + rpt_fcn_called = TRUE; + rpt_status = status; +} + +static void +check_auto_cache_resize(void) +{ + const char * fcn_name = "check_auto_cache_resize()"; + hbool_t show_progress = FALSE; + herr_t result; + int32_t i; + int32_t checkpoint = 0; + H5C_t * cache_ptr = NULL; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, + /* H5C_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn, + + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (512 * 1024), + + /* double min_clean_fraction = */ 0.5, + + /* size_t max_size = */ (14 * 1024 * 1024), + /* size_t min_size = */ (512 * 1024), + + /* int64_t epoch_length = */ 1000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__threshold, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.1, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.05 + }; + + TESTING("automatic cache resizing"); + + pass = TRUE; + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* allocate a cache, enable automatic cache resizing, and then force + * the cache through all its operational modes. Verify that all + * performs as expected. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + } + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after initialization.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache not full -- should result in not + * full status. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, PICO_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, PICO_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 1.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full -- should result in increase + * of cache size from .5 to 1 meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 2.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache not full -- should result in not + * full status. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, PICO_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, PICO_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 3.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full again -- should result in increase + * of cache size from 1 to 2 meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 4.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full again -- should result in increase + * of cache size from 2 to 4 meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 5.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full again -- should result in increase + * of cache size from 4 to 8 meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 6.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full again -- should result in increase + * of cache size from 8 to 12 meg. Note that max increase reduced the + * size of the increase. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (12 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (6 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 7.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full again -- should result in increase + * of cache size from 12 to 14 meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (14 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (7 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 8.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full and at maximum size -- should + * in no change in size and a result of at_max_size. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (14 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (7 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 9.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate with cache full and at maximum size -- should + * result in a decrease from 14 to 13 Meg -- note that max decrease + * reduced the size of the reduction + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (13 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (6 * 1024 * 1024 + 512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 10.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* the current cache configuration is unconvenient for testing cache + * size reduction, so lets change it some something easier to work + * with. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1000 * 1000 + 10; + + auto_size_ctl.min_clean_fraction = 0.1; + + auto_size_ctl.max_size = 8 * 1000 * 1000; + auto_size_ctl.min_size = 500 * 1000; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1000 * 1000); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1000 * 1000); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 2.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1000 * 1000 + 10) ) || + ( cache_ptr->min_clean_size != (400 * 1000 + 1) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 1.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should result in a decrease from ~4 to ~3 + * M -- note that max decrease reduces the size of the reduction + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (3 * 1000 * 1000 + 10) ) || + ( cache_ptr->min_clean_size != (300 * 1000 + 1) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 11.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- should result in a decrease from ~3 + * to ~2 M -- again note that max decrease reduces the size of the + * reduction. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2 * 1000 * 1000 + 10) ) || + ( cache_ptr->min_clean_size != (200 * 1000 + 1) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 12.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- should result in a decrease from ~2 + * to ~1 M -- again note that max decrease reduces the size of the + * reduction, but only by five bites. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (1 * 1000 * 1000 + 10) ) || + ( cache_ptr->min_clean_size != (100 * 1000 + 1) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 13.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- should result in a decrease from ~1 + * to ~0.5 M -- max decrease is no longer a factor. New size is five + * bytes above the minimum. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (500 * 1000 + 5) ) || + ( cache_ptr->min_clean_size != (50 * 1000) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 14.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- should result in a decrease of five + * bytes to the minimum cache size. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (500 * 1000) ) || + ( cache_ptr->min_clean_size != (50 * 1000) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 15.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- Already at minimum size so no change in + * cache size and result should be at_min_size. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_min_size ) || + ( cache_ptr->max_cache_size != (500 * 1000) ) || + ( cache_ptr->min_clean_size != (50 * 1000) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 16.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force in range hit rate -- should be no change in cache size, + * and result should be in_spec. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 900 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i + 1000); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i + 1000, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (500 * 1000) ) || + ( cache_ptr->min_clean_size != (50 * 1000) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 17.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full -- should + * increase cache size from .5 to 1 M. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (1 * 1000 * 1000) ) || + ( cache_ptr->min_clean_size != (100 * 1000) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 18.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should result in a decrease to the + * minimum cache size. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (500 * 1000) ) || + ( cache_ptr->min_clean_size != (50 * 1000) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 19.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /****************************************************************** + * now do some tests with the maximum increase and decrease sizes + * disabled. + ******************************************************************/ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 4.0; + + auto_size_ctl.apply_max_increment = FALSE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.25; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 3.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 2.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should result in a decrease to the + * minimum cache size. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 20.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full -- should increase cache size + * from 1 to 4 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 21.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate again with cache full -- should increase cache + * size from 4 to 16 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (16 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != ( 8 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 22.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should result in a decrease cache size from + * 16 to 4 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 23.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /****************************************************************** + * We have tested the threshold increment and decrement modes. + * must now test the ageout decrement mode. + * + * Reconfigure the cache for this testing. + ******************************************************************/ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 4.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 3.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* fill the cache with 1024 byte entries -- nothing should happen + * for three epochs while the markers are inserted into the cache + * + * Note that hit rate will be zero, so the cache will attempt to + * increase its size. Since we are already at max size, it will + * not be able to. + */ + if ( pass ) { /* first epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 24.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* second epoch */ + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 25.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* third epoch */ + + rpt_fcn_called = FALSE; + i = 2000; + while ( ( pass ) && ( i < 3000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 26.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fourth epoch -- If the hit rate were above the lower threshold, + * we would see cache size reduction now. However, nothing will + * happen until we get the hit rate above the lower threshold. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 27.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fifth epoch -- force the hit rate to 100%. We should see cache size + * reduction now. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2001 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2001 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 28.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* sixth epoch -- force the hit rate to 100% again. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (1001 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(1001 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 29.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* seventh epoch -- force the hit rate to 100% again. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (1000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 30.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* eigth epoch -- force the hit rate to 100% again -- should be steady + * state. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (1000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 31.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "*check point %d\n", checkpoint++); + + /* now just bang on one entry -- after three epochs, this should + * get all entries other than the one evicted, and the cache size + * should be decreased to the minimum. + */ + if ( pass ) { /* ninth epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (1000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 32.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* tenth epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (1000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(1000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 33.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* eleventh epoch -- cache size reduction */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) || + ( cache_ptr->index_len != 2 ) || + ( cache_ptr->index_size != + MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 34.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* twelth epoch -- at minimum size so no more ageouts */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_min_size ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) || + ( cache_ptr->index_len != 2 ) || + ( cache_ptr->index_size != + MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 35.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* repeat the above test, but with max_decrement enabled to see + * if that features works as it should. Note that this will change + * the structure of the test a bit. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 5.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 4.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* fill the cache with 1024 byte entries -- nothing should happen + * for three epochs while the markers are inserted into the cache + * + * Note that hit rate will be zero, so the cache will attempt to + * increase its size. Since we are already at max size, it will + * not be able to. + */ + if ( pass ) { /* first epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 36.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* second epoch */ + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 37.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* third epoch */ + + rpt_fcn_called = FALSE; + i = 2000; + while ( ( pass ) && ( i < 3000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 38.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fourth epoch -- If the hit rate were above the lower threshold, + * we would see cache size reduction now. However, nothing will + * happen until we get the hit rate above the lower threshold. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 39.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fifth epoch -- force the hit rate to 100%. We should see cache size + * reduction now. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (7 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (7 * 512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 40.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* sixth epoch -- force the hit rate to 100% again. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 2000; + while ( ( pass ) && ( i < 3000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (6 * 512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 41.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* seventh epoch -- keep hit rate at 100%, and keep 2K entries active. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (5 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (5 * 512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 42.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* eigth epoch -- still 100% hit rate + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 2000; + while ( ( pass ) && ( i < 3000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 43.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* ninth epoch --hit rate at 100%. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (3 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 44.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* tenth epoch -- still 100% hit rate + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 2000; + while ( ( pass ) && ( i < 3000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 45.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* eleventh epoch -- hit rate at 100% -- starting to stableize + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 46.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* twelth epoch -- force the hit rate to 100% again -- should be steady + * state. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 2000; + while ( ( pass ) && ( i < 3000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 47.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* now just bang on one entry -- after three epochs, this should + * get all entries other than the one evicted, and the cache size + * should be decreased to the minimum. + */ + if ( pass ) { /* thirteenth epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 48.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* fourteenth epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != + (1001 * 1024 + MONSTER_ENTRY_SIZE) ) || + ( cache_ptr->min_clean_size != + (1001 * 512 + MONSTER_ENTRY_SIZE / 2) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 49.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* fifteenth epoch -- cache size reduction */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) || + ( cache_ptr->index_len != 2 ) || + ( cache_ptr->index_size != + MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 50.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* sixteenth epoch -- at minimum size so no more ageouts */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_min_size ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) || + ( cache_ptr->index_len != 2 ) || + ( cache_ptr->index_size != + MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 51.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* repeat the test yet again, this time with empty reserve enabled. + * Again, some structural changes in the test are necessary. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.5; /* for ease of testing */ + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 6.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 5.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* fill the cache with 1024 byte entries -- nothing should happen + * for three epochs while the markers are inserted into the cache + * + * Note that hit rate will be zero, so the cache will attempt to + * increase its size. Since we are already at max size, it will + * not be able to. + */ + if ( pass ) { /* first epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 52.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* second epoch */ + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 53.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* third epoch */ + + rpt_fcn_called = FALSE; + i = 2000; + while ( ( pass ) && ( i < 3000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 54.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fourth epoch -- If the hit rate were above the lower threshold, + * we would see cache size reduction now. However, nothing will + * happen until we get the hit rate above the lower threshold. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 55.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fifth epoch -- force the hit rate to 100%. We should see cache size + * reduction now. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (4002 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(4002 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 56.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* sixth epoch -- force the hit rate to 100% again. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2002 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2002 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 57.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* seventh epoch -- force the hit rate to 100% again. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 58.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* eigth epoch -- force the hit rate to 100% again -- should be steady + * state. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 3000; + while ( ( pass ) && ( i < 4000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 59.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* now just bang on one entry -- after three epochs, this should + * get all entries other than the one evicted, and the cache size + * should be decreased to the minimum. + */ + if ( pass ) { /* ninth epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2000 * 1024) ) || + ( cache_ptr->min_clean_size != (int)(2000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 60.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* tenth epoch */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2000 * 1024) ) || + ( cache_ptr->min_clean_size != (2000 * 512) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 61.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* eleventh epoch -- cache size reduction */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) || + ( cache_ptr->index_len != 2 ) || + ( cache_ptr->index_size != + MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 62.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* twelth epoch -- at minimum size so no more ageouts */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_min_size ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) || + ( cache_ptr->index_len != 2 ) || + ( cache_ptr->index_size != + MONSTER_ENTRY_SIZE + MEDIUM_ENTRY_SIZE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 63.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* Repeat the test again, this time using the age out with threshold + * mode. To simplify the testing, set epochs to eviction to 1. + * + * Again, there are some minor structural changes in the test. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out_with_threshold; + + auto_size_ctl.upper_hr_threshold = 0.999; /* for ease of testing */ + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 1; /* for ease of testing */ + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 7.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 6.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* fill the cache with 4K byte entries -- increment mode is off, + * so cache size reduction should kick in as soon as we get the + * hit rate above .999. + */ + if ( pass ) { /* first epoch -- hit rate 0 */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 64.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* second epoch -- hit rate 0 */ + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 65.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* third epoch -- hit rate 1.0 -- should see decrease */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) || + ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 66.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fourth epoch -- load up the cache again -- hit rate 0 */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) || + ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 67.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fifth epoch -- still loading up the cache -- hit rate 0 */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) || + ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 68.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* sixth epoch -- force hit rate to .998 -- should be no reduction */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 1002; + while ( ( pass ) && ( i < 2002 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (1001 * LARGE_ENTRY_SIZE) ) || + ( cache_ptr->min_clean_size != (1001 * LARGE_ENTRY_SIZE / 2) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 69.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* seventh epoch -- force hit rate to .999 -- should see reduction + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 1003; + while ( ( pass ) && ( i < 2003 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (1000 * MEDIUM_ENTRY_SIZE) ) || + ( cache_ptr->min_clean_size != (1000 * MEDIUM_ENTRY_SIZE / 2) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 70.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* We have now tested all the major ageout modes individually. + * Lets try them all together to look for unexpected interactions + * and/or bugs. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1000 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1000 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out_with_threshold; + + auto_size_ctl.upper_hr_threshold = 0.999; /* for ease of testing */ + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1000 * 1024); + + auto_size_ctl.epochs_before_eviction = 1; /* for ease of testing */ + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.5; /* for ease of testing */ + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 8.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 7.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fill the cache with 4K byte entries -- increment mode is threshold, + * so the decrease code will not be executed until the hit rate exceeds + * .75. + */ + if ( pass ) { /* first epoch -- hit rate 0 */ + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 71.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { /* second epoch -- hit rate 0 */ + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 72.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* third epoch -- force the hit rate to 1.0. Should be no change + * in the cache size due to the combination of the empty reserve + * and the max decrease. Max decrease will limit the evictions + * in any one epoch, and the empty reserve will not permit cache + * size reduction unless the specified empty reserve is maintained. + * + * In this epoch, all we should see is a reduction in the index size. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) || + ( cache_ptr->index_size != (7 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 73.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fourth epoch -- hit rate still 1.0. Index size should decrease, + * but otherwise no change expected. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) || + ( cache_ptr->index_size != (6 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 74.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fifth epoch -- hit rate still 1.0. Index size should decrease, + * but otherwise no change expected. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) || + ( cache_ptr->index_size != (5 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 75.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* sixth epoch -- hit rate still 1.0. Index size should decrease, + * but otherwise no change expected. Note that the cache size is + * now just on the edge of meeting the clean reserve. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) || + ( cache_ptr->index_size != (4 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 76.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* seventh epoch -- hit rate still 1.0. No change in index size expected. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, LARGE_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, LARGE_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) || + ( cache_ptr->index_size != (4 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 77.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* eighth epoch -- start loading 1 KB entries. Hit rate 0 so + * decrease code shouldn't be called. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != at_max_size ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) || + ( cache_ptr->index_size != (5 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 78.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* ninth epoch -- access the 1 KB entries again, driving the hit rate + * to 1.0. Decrease code should be triggered, but the max decrease + * should prevent the empty reserve from being met in this epoch. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1000 * 1024) ) || + ( cache_ptr->index_size != (4 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 79.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* tenth epoch -- access the 1 KB entries yet again, forcing hit rate + * to 1.0. Decrease code should be triggered, and the empty reserve + * should finally be met. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (7 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (7 * 1000 * 1024 / 2) ) || + ( cache_ptr->index_size != (3 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 80.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* eleventh epoch -- access the 1 KB entries yet again, forcing hit rate + * to 1.0. Decrease code should be triggered, and the empty reserve + * should be met again. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (6 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1000 * 1024) ) || + ( cache_ptr->index_size != (2 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 81.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* twelth epoch -- hit rate 1.0 -- decrease as before. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (5 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (5 * 1000 * 1024 / 2) ) || + ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 82.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* thirteenth epoch -- hit rate 1.0 -- decrease as before. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (4 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1000 * 1024) ) || + ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 83.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fourteenth epoch -- hit rate 1.0 -- decrease as before. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (3 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1000 * 1024 / 2) ) || + ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 84.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* fifteenth epoch -- hit rate 1.0 -- decrease as before. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1000 * 1024) ) || + ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 85.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* sixteenth epoch -- hit rate 1.0 -- should be stable now + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2 * 1000 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1000 * 1024) ) || + ( cache_ptr->index_size != (1 * 1000 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 86.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_auto_cache_resize() */ + + +/*------------------------------------------------------------------------- + * Function: check_auto_cache_resize_disable() + * + * Purpose: Test the various ways in which the resize code can + * be disabled. Unfortunately, there are quite a few of them. + * + * Return: void + * + * Programmer: John Mainzer + * 12/16/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_auto_cache_resize_disable(void) +{ + const char * fcn_name = "check_auto_cache_resize_disable()"; + hbool_t show_progress = FALSE; + herr_t result; + int32_t i; + int32_t checkpoint = 0; + H5C_t * cache_ptr = NULL; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, + /* H5C_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn, + + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (512 * 1024), + + /* double min_clean_fraction = */ 0.5, + + /* size_t max_size = */ (14 * 1024 * 1024), + /* size_t min_size = */ (512 * 1024), + + /* int64_t epoch_length = */ 1000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__threshold, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.1, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.05 + }; + + TESTING("automatic cache resize disable"); + + pass = TRUE; + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* allocate a cache, enable automatic cache resizing, and then force + * the cache through all its operational modes. Verify that all + * performs as expected. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + } + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after initialization.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /****************************************************************** + * So far, we have forced the auto cache resize through all modes + * other than increase_disabled and decrease_disabled. Force these + * modes now. Note that there are several ways we can reach these + * modes. + ******************************************************************/ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 1.0; /* disable size increases */ + + auto_size_ctl.apply_max_increment = FALSE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 2.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 1.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full -- increase disabled so should + * be no change in cache size, and result should be increase_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_increase_possible ) || + ( rpt_status != increase_disabled ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 1.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- make sure that we haven't disabled decreases. + * should result in a decrease cache size from 4 to 2 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 2.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate again -- increase disabled so should + * be no change in cache size, and result should be increase_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_increase_possible ) || + ( rpt_status != increase_disabled ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 3.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Repeat the above tests, disabling increase through the lower + * threshold instead of the increment. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.0; /* disable size increases */ + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = FALSE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 3.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 2.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full -- increase disabled so should + * be no change in cache size, and result should be in_spec. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_increase_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 4.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- make sure that we haven't disabled decreases. + * should result in a decrease cache size from 4 to 2 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 5.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate again -- increase disabled so should + * be no change in cache size, and result should be increase_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_increase_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 6.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Repeat the above tests yet again, disabling increase through the + * incr_mode. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = FALSE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 4.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 3.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate with cache full -- increase disabled so should + * be no change in cache size, and result should be in_spec. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_increase_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 7.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- make sure that we haven't disabled decreases. + * should result in a decrease cache size from 4 to 2 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 8.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate again -- increase disabled so should + * be no change in cache size, and result should be increase_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_increase_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 9.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Now, disable size decreases, and repeat the above tests. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 1.0; /* disable size decreases */ + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 5.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 4.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no change in cache size, + * and result should be decrease_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 10.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- cache size should increase from 4 to 6 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 11.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- should be no change in cache size, + * and result should be decrease_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 12.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Repeat the above tests, disabling decrease through the upper + * threshold instead of the decrement. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 1.0; /* disable size decreases */ + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 6.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 5.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no change in cache size, + * and result should be in_spec. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 13.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- cache size should increase from 4 to 6 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 14.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- should be no change in cache size, + * and result should be in_spec. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 15.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Repeat the above tests, disabling decrease through the decr_mode. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__off; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 7.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 6.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no change in cache size, + * and result should be in_spec. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 16.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- cache size should increase from 4 to 6 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 17.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate again -- should be no change in cache size, + * and result should be in_spec. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 18.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Now do tests disabling size decrement in age out mode. + * + * Start by disabling size decrement by setting max_decrement to zero. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = 0; /* disable decrement */ + + auto_size_ctl.epochs_before_eviction = 1; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 8.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 7.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* flush the cache and destroy all entries so we start from a known point */ + flush_cache(cache_ptr, TRUE, FALSE, FALSE); + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* load up the cache with small entries. Note that it will take an + * epoch for the ageout code to initialize itself if it is enabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 19.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Load up some more small entries. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 20.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Now force a high hit rate so that the size increase code is + * is satisfied. We would see a decrease here if decrease were + * possible. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 21.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- cache size should increase from 4 to 6 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 22.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* just bang on a single entry. This will see to it that there are + * many entries that could be aged out were decreases enabled. + * Should be no change in cache size, and result should be + * decrease_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 23.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Now disable size decrement in age out mode via the empty reserve. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 1; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 1.0; /* disable decrement */ + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 9.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 8.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* flush the cache and destroy all entries so we start from a known point */ + flush_cache(cache_ptr, TRUE, FALSE, FALSE); + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* load up the cache with small entries. Note that it will take an + * epoch for the ageout code to initialize itself if it is enabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 24.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Load up some more small entries. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 25.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Now force a high hit rate so that the size increase code is + * is satisfied. We would see a decrease here if decrease were + * possible. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 26.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- cache size should increase from 4 to 6 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 27.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* just bang on a single entry. This will see to it that there are + * many entries that could be aged out were decreases enabled. + * Should be no change in cache size, and result should be + * decrease_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 28.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Now work with age out with threshold. One can argue that we should + * repeat the above age out tests with age out with threshold, but the + * same code is executed in both cases so I don't see the point. If + * that ever changes, this test should be updated. + * + * There is only one way of disabling decrements that is peculiar + * to age out with threshold, which is to set the upper threshold + * to 1.0. Test this now. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out_with_threshold; + + auto_size_ctl.upper_hr_threshold = 1.0; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 1; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 10.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 9.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* flush the cache and destroy all entries so we start from a known point */ + flush_cache(cache_ptr, TRUE, FALSE, FALSE); + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* load up the cache with small entries. Note that it will take an + * epoch for the ageout code to initialize itself if it is enabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 29.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Load up some more small entries. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 1000; + while ( ( pass ) && ( i < 2000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != not_full ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 30.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Now force a high hit rate so that the size increase code is + * is satisfied. We would see a decrease here if decrease were + * possible, but the upper threshold cannot be met, so no decrease. + * + * rpt_status should be decrease_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) || + ( cache_ptr->index_len != 2000 ) || + ( cache_ptr->index_size != 2000 * SMALL_ENTRY_SIZE ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 31.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- cache size should increase from 4 to 6 Meg. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != increase ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 32.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* just bang on a single entry. This keeps the hit rate high, and sees + * to it that there are many entries that could be aged out were + * decreases enabled. + * + * Should be no change in cache size, and result should be + * decrease_disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 999); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 999, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( cache_ptr->size_decrease_possible ) || + ( rpt_status != decrease_disabled ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 33.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /********************************************************************* + * Finally, use the auto cache resize code to set the size of the + * cache and keep it there. Again, due to the complexity of the + * interface, there are lots of ways of doing this. We have to + * check them all. + *********************************************************************/ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 2 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.0; /* disable size increases */ + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 1.0; /* disable size decreases */ + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 11.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 10.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 34.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (2 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 35.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.25; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 1.0; /* disable size increment */ + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 1.0; /* disable size decrement */ + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 12.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 11.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 36.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 37.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = FALSE; + auto_size_ctl.initial_size = 2 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 6 * 1024 * 1024; /* no resize */ + auto_size_ctl.min_size = 6 * 1024 * 1024; /* no resize */ + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 13.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 12.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 38.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (6 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (3 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 39.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.25; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 1.0; /* disable size increment */ + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 1.0; /* disable size decrement */ + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 14.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 13.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 40.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (1 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 41.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__threshold; + + auto_size_ctl.lower_hr_threshold = 0.0; /* disable size increment */ + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 1.0; /* disable size decrement */ + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 15.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 14.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 42.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 43.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 4 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 16 * 1024 * 1024; + auto_size_ctl.min_size = 1 * 1024 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__off; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = TRUE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 3; + + auto_size_ctl.apply_empty_reserve = TRUE; + auto_size_ctl.empty_reserve = 0.05; + + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 16.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 15.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force low hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 44.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* force high hit rate -- should be no response as the auto-resize + * code should be disabled. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); + + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( rpt_fcn_called ) || + ( cache_ptr->resize_enabled ) || + ( cache_ptr->size_increase_possible ) || + ( cache_ptr->size_decrease_possible ) || + ( cache_ptr->max_cache_size != (4 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (2 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 45.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_auto_cache_resize_disable() */ + + +/*------------------------------------------------------------------------- + * Function: check_auto_cache_resize_epoch_markers() + * + * Purpose: Verify that the auto-resize code manages epoch markers + * correctly. + * + * Return: void + * + * Programmer: John Mainzer + * 12/16/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_auto_cache_resize_epoch_markers(void) +{ + const char * fcn_name = "check_auto_cache_resize_epoch_markers()"; + hbool_t show_progress = FALSE; + herr_t result; + int32_t i; + int32_t j; + int32_t checkpoint = 0; + H5C_t * cache_ptr = NULL; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, + /* H5C_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn, + + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (512 * 1024), + + /* double min_clean_fraction = */ 0.5, + + /* size_t max_size = */ (14 * 1024 * 1024), + /* size_t min_size = */ (512 * 1024), + + /* int64_t epoch_length = */ 1000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__threshold, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.1, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.05 + }; + + TESTING("automatic cache resize epoch marker management"); + + pass = TRUE; + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + } + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after initialization.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + /* Now make sure that we are managing the epoch markers correctly. + */ + + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 10; + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 2.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 1.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Since we just created the cache, there should be no epoch markers + * active. Verify that this is true. + */ + + if ( pass ) { + + if ( cache_ptr->epoch_markers_active != 0 ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 1.\n"; + } + } + + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, MEDIUM_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) || + ( cache_ptr->index_size != (1 * 1000 * MEDIUM_ENTRY_SIZE) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 0.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + + if ( pass ) { + + j = 2; + while ( ( pass ) && ( j <= 10 ) ) + { + + rpt_fcn_called = FALSE; + i = (j - 2) * 1000; + while ( ( pass ) && ( i < (j - 1) * 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->epoch_markers_active != j ) ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 2.\n"; + } + + j++; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* we now have a full complement of epoch markers -- see if + * we get the expected reduction. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 9000; + while ( ( pass ) && ( i < 10000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != + (10 * 1000 * SMALL_ENTRY_SIZE + MEDIUM_ENTRY_SIZE) ) || + ( cache_ptr->min_clean_size != + ((10 * 1000 * SMALL_ENTRY_SIZE + MEDIUM_ENTRY_SIZE) / 2) ) || + ( cache_ptr->index_size != + (10 * 1000 * SMALL_ENTRY_SIZE + MEDIUM_ENTRY_SIZE) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 1.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* now reduce the epochs before eviction, and see if the cache + * deletes the extra markers + */ + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 1; + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 3.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 2.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* There should be exactly one active epoch marker at present. + */ + if ( pass ) { + + if ( cache_ptr->epoch_markers_active != 1 ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 3.\n"; + } + } + + /* Now do an epochs worth of accesses, and verify that everything + * not accessed in this epoch gets evicted, and the cache size + * is reduced. + */ + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 9000; + while ( ( pass ) && ( i < 10000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != decrease ) || + ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) || + ( cache_ptr->index_size != (1 * 1000 * SMALL_ENTRY_SIZE) ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 2.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* There should be exactly one active epoch marker at present... + */ + if ( pass ) { + + if ( cache_ptr->epoch_markers_active != 1 ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 4.\n"; + } + } + + /* shift the decrement mode to threshold, and verify that we remove + * all epoch markers. + */ + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 1; + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 4.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after set resize re-config 3.\n"; + } + } + + /* ... and now there should be none. + */ + if ( pass ) { + + if ( cache_ptr->epoch_markers_active != 0 ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 5.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* shift the decrement mode to age out with threshold. Set epochs + * before eviction to 10 again. + */ + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__age_out_with_threshold; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 10; + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 5.\n"; + } + } + + /* Verify that there are no active epoch markers. + */ + if ( pass ) { + + if ( cache_ptr->epoch_markers_active != 0 ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 6.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* build up a full set of epoch markers. */ + if ( pass ) { + + j = 1; + while ( ( pass ) && ( j <= 10 ) ) + { + + rpt_fcn_called = FALSE; + i = (j - 1) * 1000; + while ( ( pass ) && ( i < j * 1000 ) ) + { + protect_entry(cache_ptr, SMALL_ENTRY_TYPE, i); + + if ( pass ) { + unprotect_entry(cache_ptr, SMALL_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + + if ( ( ! rpt_fcn_called ) || + ( rpt_status != in_spec ) || + ( cache_ptr->epoch_markers_active != j ) ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 7.\n"; + } + + j++; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* Verify that there are now 10 active epoch markers. + */ + if ( pass ) { + + if ( cache_ptr->epoch_markers_active != 10 ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 8.\n"; + } + } + + /* shift the decrement mode to off. This should cause all epoch + * markers to be removed. + */ + if ( pass ) { + + auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + auto_size_ctl.rpt_fcn = test_rpt_fcn; + + auto_size_ctl.set_initial_size = TRUE; + auto_size_ctl.initial_size = 8 * 1024 * 1024; + + auto_size_ctl.min_clean_fraction = 0.5; + + auto_size_ctl.max_size = 8 * 1024 * 1024; + auto_size_ctl.min_size = 512 * 1024; + + auto_size_ctl.epoch_length = 1000; + + + auto_size_ctl.incr_mode = H5C_incr__off; + + auto_size_ctl.lower_hr_threshold = 0.75; + + auto_size_ctl.increment = 2.0; + + auto_size_ctl.apply_max_increment = TRUE; + auto_size_ctl.max_increment = (4 * 1024 * 1024); + + + auto_size_ctl.decr_mode = H5C_decr__off; + + auto_size_ctl.upper_hr_threshold = 0.995; + + auto_size_ctl.decrement = 0.5; + + auto_size_ctl.apply_max_decrement = FALSE; + auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + auto_size_ctl.epochs_before_eviction = 10; + + auto_size_ctl.apply_empty_reserve = FALSE; + auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 6.\n"; + } + } + + /* Verify that there are now no active epoch markers. + */ + if ( pass ) { + + if ( cache_ptr->epoch_markers_active != 0 ) { + + pass = FALSE; + failure_mssg = "Unexpected # of epoch markers 9.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + /* verify that we still have the expected number of entries in the cache, + * and that the cache is of the expected size. + */ + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (8 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (4 * 1024 * 1024) )|| + ( cache_ptr->index_size != (10 * 1000 * SMALL_ENTRY_SIZE) ) || + ( cache_ptr->index_len != 10000 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache size change results 3.\n"; + } + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++); + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_auto_cache_resize_epoch_markers() */ + + +/*------------------------------------------------------------------------- + * Function: check_auto_cache_resize_input_errs() + * + * Purpose: Verify that H5C_set_cache_auto_resize_config() detects + * and rejects invalid input. + * + * Return: void + * + * Programmer: John Mainzer + * 10/29/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +#define RESIZE_CONFIGS_ARE_EQUAL(a, b, compare_init) \ +( ( (a).version == (b).version ) && \ + ( (a).rpt_fcn == (b).rpt_fcn ) && \ + ( ( ! compare_init ) || \ + ( (a).set_initial_size == (b).set_initial_size ) ) && \ + ( ( ! compare_init ) || \ + ( (a).initial_size == (b).initial_size ) ) && \ + ( (a).min_clean_fraction == (b).min_clean_fraction ) && \ + ( (a).max_size == (b).max_size ) && \ + ( (a).min_size == (b).min_size ) && \ + ( (a).epoch_length == (b).epoch_length ) && \ + ( (a).incr_mode == (b).incr_mode ) && \ + ( (a).lower_hr_threshold == (b).lower_hr_threshold ) && \ + ( (a).increment == (b).increment ) && \ + ( (a).apply_max_increment == (b).apply_max_increment ) && \ + ( (a).max_increment == (b).max_increment ) && \ + ( (a).decr_mode == (b).decr_mode ) && \ + ( (a).upper_hr_threshold == (b).upper_hr_threshold ) && \ + ( (a).decrement == (b).decrement ) && \ + ( (a).apply_max_decrement == (b).apply_max_decrement ) && \ + ( (a).max_decrement == (b).max_decrement ) && \ + ( (a).epochs_before_eviction == (b).epochs_before_eviction ) && \ + ( (a).apply_empty_reserve == (b).apply_empty_reserve ) && \ + ( (a).empty_reserve == (b).empty_reserve ) ) + +static void +check_auto_cache_resize_input_errs(void) +{ + const char * fcn_name = "check_auto_cache_resize_input_errs()"; + herr_t result; + H5C_t * cache_ptr = NULL; + H5C_auto_size_ctl_t ref_auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, + /* H5C_auto_resize_report_fcn rpt_fcn = */ test_rpt_fcn, + + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (512 * 1024), + + /* double min_clean_fraction = */ 0.5, + + /* size_t max_size = */ (16 * 1024 * 1024), + /* size_t min_size = */ (512 * 1024), + + /* int64_t epoch_length = */ 1000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__threshold, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__threshold, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.1, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.05 + }; + + H5C_auto_size_ctl_t invalid_auto_size_ctl; + H5C_auto_size_ctl_t test_auto_size_ctl; + + TESTING("automatic cache resize input errors"); + + pass = TRUE; + + /* allocate a cache, and set a reference automatic cache control + * configuration. Then feed H5C_set_cache_auto_resize_config() + * invalid input, and verify that the correct error is returned, + * and that the configuration is not modified. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + } + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &ref_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (512 * 1024) ) || + ( cache_ptr->min_clean_size != (256 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after initialization.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 1."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 1."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.7; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(NULL, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted NULL cache_ptr.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 2."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 2."; + } + } + + + /* check bad version rejection. */ + + if ( pass ) { + + invalid_auto_size_ctl.version = -1; /* INVALID */ + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.7; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad version.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 3."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 3."; + } + } + + + /* check bad initial size rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 16 * 1024 * 1024 + 1; + /* INVALID */ + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad init size 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 4."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 4."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 1 * 1024 * 1024 - 1; + /* INVALID */ + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad init size 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 5."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 5."; + } + } + + + /* test for invalid min clean fraction rejection. */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 1.00001; /* INVALID */ + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad min clean frac 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 6."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 6."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = -0.00001; /* INVALID */ + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad min clean frac 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 7."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 7."; + } + } + + + /* test for invalid max_size and/or min_size rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = H5C__MAX_MAX_CACHE_SIZE + 1; + /* INVALID */ + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad max_size.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 8."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 8."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 1 * 1024 * 1024;/* INVALID */ + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024 + 1;/*PAIR */ + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad size pair.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 9."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 9."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = H5C__MIN_MAX_CACHE_SIZE - 1; + /* INVALID */ + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad min_size.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 10."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 10."; + } + } + + + /* test for invalid epoch_length rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = H5C__MAX_AR_EPOCH_LENGTH + 1; + /* INVALID */ + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad epoch len 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 11."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 11."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = H5C__MIN_AR_EPOCH_LENGTH - 1; + /* INVALID */ + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad epoch len 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 12."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 12."; + } + } + + + /* test for bad incr_mode rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = + (enum H5C_cache_incr_mode) -1; /* INVALID */ + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad incr_mode 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 13."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 13."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = + (enum H5C_cache_incr_mode) 2; /* INVALID */ + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad incr_mode 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 14."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 14."; + } + } + + + /* check for bad upper and/or lower threshold rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.7; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 1.01; /* INVALID */ + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad upper threshold.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 15."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 15."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.8; /* INVALID */ + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.7; /* INVALID */ + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad threshold pair.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 16."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 16."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.5; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = -0.0001; /* INVALID */ + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad lower threshold.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 17."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 17."; + } + } + + + /* test for bad increment rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 0.99999; /* INVALID */ + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.5; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad increment.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 18."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 18."; + } + } + + + /* test for bad decr_mode rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = + (enum H5C_cache_decr_mode) -1; /* INVALID */ + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad decr_mode 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 19."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 19."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = + (enum H5C_cache_incr_mode) 4; /* INVALID */ + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad decr_mode 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 20."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 20."; + } + } + + + /* check for bad decrement rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 1.000001; /* INVALID */ + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad decrement 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 21."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 21."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = -0.000001; /* INVALID */ + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_set_cache_auto_resize_config accepted bad decrement 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 22."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 22."; + } + } + + + /* check for rejection of bad epochs_before_eviction */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__age_out; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 0; /* INVALID */ + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config accepted bad epochs_before_eviction 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 23."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 23."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__age_out_with_threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = + H5C__MAX_EPOCH_MARKERS + 1; /* INVALID */ + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config accepted bad epochs_before_eviction 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 24."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 24."; + } + } + + + /* Check for bad apply_empty_reserve rejection */ + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__age_out; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = 3; + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = -0.0000001; /* INVALID */ + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config accepted bad empty_reserve 1.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 25."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 25."; + } + } + + if ( pass ) { + + invalid_auto_size_ctl.version = H5C__CURR_AUTO_SIZE_CTL_VER; + invalid_auto_size_ctl.rpt_fcn = NULL; + + invalid_auto_size_ctl.set_initial_size = TRUE; + invalid_auto_size_ctl.initial_size = 4 * 1024 * 1024; + + invalid_auto_size_ctl.min_clean_fraction = 0.1; + + invalid_auto_size_ctl.max_size = 16 * 1024 * 1024; + invalid_auto_size_ctl.min_size = 1 * 1024 * 1024; + + invalid_auto_size_ctl.epoch_length = 5000; + + + invalid_auto_size_ctl.incr_mode = H5C_incr__threshold; + + invalid_auto_size_ctl.lower_hr_threshold = 0.75; + + invalid_auto_size_ctl.increment = 2.0; + + invalid_auto_size_ctl.apply_max_increment = TRUE; + invalid_auto_size_ctl.max_increment = (2 * 1024 * 1024); + + + invalid_auto_size_ctl.decr_mode = H5C_decr__age_out_with_threshold; + + invalid_auto_size_ctl.upper_hr_threshold = 0.999; + + invalid_auto_size_ctl.decrement = 0.9; + + invalid_auto_size_ctl.apply_max_decrement = TRUE; + invalid_auto_size_ctl.max_decrement = (1 * 1024 * 1024); + + invalid_auto_size_ctl.epochs_before_eviction = + H5C__MAX_EPOCH_MARKERS + 1; /* INVALID */ + + invalid_auto_size_ctl.apply_empty_reserve = TRUE; + invalid_auto_size_ctl.empty_reserve = 0.05; + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &invalid_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config accepted bad empty_reserve 2.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, + &test_auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_auto_resize_config failed 26."; + + } else if ( ! RESIZE_CONFIGS_ARE_EQUAL(test_auto_size_ctl, \ + ref_auto_size_ctl, FALSE) ) { + + pass = FALSE; + failure_mssg = "Unexpected auto resize config 26."; + } + } + + + /* finally, before we finish, try feeding + * H5C_get_cache_auto_resize_config invalid data. + */ + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(NULL, &test_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_auto_resize_config accepted NULL cache_ptr.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config((H5C_t *)&test_auto_size_ctl, + &test_auto_size_ctl); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_auto_resize_config accepted bad cache_ptr.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_auto_resize_config(cache_ptr, NULL); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_auto_resize_config accepted NULL config ptr.\n"; + } + } + + if ( pass ) { + + takedown_cache(cache_ptr, FALSE, FALSE); + } + + if ( pass ) { PASSED(); } else { H5_FAILED(); } + + if ( ! pass ) + HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", + fcn_name, failure_mssg); + +} /* check_auto_cache_resize_input_errs() */ + + +/*------------------------------------------------------------------------- + * Function: check_auto_cache_resize_aux_fcns() + * + * Purpose: Verify that the auxilary functions associated with + * the automatic cache resize capability are operating + * correctly. These functions are: + * + * H5C_get_cache_size() + * H5C_get_cache_hit_rate() + * H5C_reset_cache_hit_rate_stats() + * + * Return: void + * + * Programmer: John Mainzer + * 11/4/04 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +static void +check_auto_cache_resize_aux_fcns(void) +{ + const char * fcn_name = "check_auto_cache_resize_aux_fcns()"; + herr_t result; + int32_t i; + H5C_t * cache_ptr = NULL; + double hit_rate; + size_t max_size; + size_t min_clean_size; + size_t cur_size; + int32_t cur_num_entries; + H5C_auto_size_ctl_t auto_size_ctl = + { + /* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER, +#if 0 + /* H5C_auto_resize_report_fcn rpt_fcn = */ NULL, +#else + /* H5C_auto_resize_report_fcn rpt_fcn = */ H5C_def_auto_resize_rpt_fcn, +#endif + /* hbool_t set_initial_size = */ TRUE, + /* size_t initial_size = */ (1 * 1024 * 1024), + + /* double min_clean_fraction = */ 0.5, + + /* size_t max_size = */ (16 * 1024 * 1025), + /* size_t min_size = */ (512 * 1024), + + /* int64_t epoch_length = */ 50000, + + + /* enum H5C_cache_incr_mode incr_mode = */ H5C_incr__off, + + /* double lower_hr_threshold = */ 0.75, + + /* double increment = */ 2.0, + + /* hbool_t apply_max_increment = */ TRUE, + /* size_t max_increment = */ (4 * 1024 * 1024), + + + /* enum H5C_cache_decr_mode decr_mode = */ H5C_decr__off, + + /* double upper_hr_threshold = */ 0.995, + + /* double decrement = */ 0.9, + + /* hbool_t apply_max_decrement = */ TRUE, + /* size_t max_decrement = */ (1 * 1024 * 1024), + + /* int32_t epochs_before_eviction = */ 3, + + /* hbool_t apply_empty_reserve = */ TRUE, + /* double empty_reserve = */ 0.5 + }; + + + TESTING("automatic cache resize auxilary functions"); + + pass = TRUE; + + /* allocate a cache, and then test the various auxilary functions. + */ + + if ( pass ) { + + reset_entries(); + + cache_ptr = setup_cache((size_t)(2 * 1024), + (size_t)(1 * 1024)); + } + + if ( pass ) { + + result = H5C_set_cache_auto_resize_config(cache_ptr, + &auto_size_ctl); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_set_cache_auto_resize_config failed 1.\n"; + } + } + + if ( pass ) { + + if ( ( cache_ptr->max_cache_size != (1 * 1024 * 1024) ) || + ( cache_ptr->min_clean_size != (512 * 1024) ) ) { + + pass = FALSE; + failure_mssg = "bad cache size after initialization.\n"; + } + } + + /* lets start with the H5C_get_cache_hit_rate(), + * H5C_reset_cache_hit_rate_stats() pair. + */ + + if ( pass ) { + + if ( ( H5C_get_cache_hit_rate(NULL, &hit_rate) != FAIL ) || + ( H5C_get_cache_hit_rate(cache_ptr, NULL) != FAIL ) ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_hit_rate accepts bad params.\n"; + } + } + + if ( pass ) { + + result = H5C_get_cache_hit_rate(cache_ptr, &hit_rate); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_hit_rate failed.\n"; + + } else if ( hit_rate != 0.0 ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_hit_rate returned unexpected hit rate 1.\n"; + } + } + + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, PICO_ENTRY_TYPE, i); + + if ( pass ) { + + unprotect_entry(cache_ptr, PICO_ENTRY_TYPE, i, + NO_CHANGE, FALSE); + } + i++; + } + } + + if ( pass ) { + + result = H5C_get_cache_hit_rate(cache_ptr, &hit_rate); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_hit_rate failed.\n"; + + } else if ( hit_rate != 0.0 ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_hit_rate returned unexpected hit rate 2.\n"; + + } else if ( ( cache_ptr->cache_accesses != 1000 ) || + ( cache_ptr->cache_hits != 0 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache hit rate stats.\n"; + + } else if ( rpt_fcn_called ) { + + pass = FALSE; + failure_mssg = "Report function called?.\n"; + + } + } + + if ( pass ) { + + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, PICO_ENTRY_TYPE, 0); + + if ( pass ) { + + unprotect_entry(cache_ptr, PICO_ENTRY_TYPE, 0, + NO_CHANGE, FALSE); + } + i++; + } + } + + if ( pass ) { + + result = H5C_get_cache_hit_rate(cache_ptr, &hit_rate); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_hit_rate failed.\n"; + + } else if ( hit_rate != 0.5 ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_hit_rate returned unexpected hit rate 3.\n"; + + } else if ( ( cache_ptr->cache_accesses != 2000 ) || + ( cache_ptr->cache_hits != 1000 ) ) { + + pass = FALSE; + failure_mssg = "Unexpected cache hit rate stats.\n"; + + } else if ( rpt_fcn_called ) { + + pass = FALSE; + failure_mssg = "Report function called?.\n"; + + } + } + + if ( pass ) { + + result = H5C_reset_cache_hit_rate_stats(NULL); + + if ( result != FAIL ) { + + pass = FALSE; + failure_mssg = + "H5C_reset_cache_hit_rate_stats accepted NULL cache_ptr.\n"; - pass = TRUE; + } else if ( ( cache_ptr->cache_accesses != 2000 ) || + ( cache_ptr->cache_hits != 1000 ) ) { - /* allocate a cache, protect an entry, and then try to insert - * the entry again. This should fail. Unprotect the entry and - * destroy the cache -- should succeed. - */ + pass = FALSE; + failure_mssg = + "Failed call to H5C_reset_cache_hit_rate_stats altered stats?\n"; + } + } if ( pass ) { - reset_entries(); - - cache_ptr = setup_cache((size_t)(2 * 1024), - (size_t)(1 * 1024)); + result = H5C_reset_cache_hit_rate_stats(cache_ptr); - protect_entry(cache_ptr, 0, 0); + if ( result != SUCCEED ) { - if ( pass ) { + pass = FALSE; + failure_mssg = "H5C_reset_cache_hit_rate_stats failed.\n"; - base_addr = entries[0]; - entry_ptr = &(base_addr[0]); + } else if ( ( cache_ptr->cache_accesses != 0 ) || + ( cache_ptr->cache_hits != 0 ) ) { - result = H5C_insert_entry(NULL, -1, -1, cache_ptr, - &(types[0]), entry_ptr->addr, - (void *)entry_ptr); + pass = FALSE; + failure_mssg = "Unexpected cache hit rate stats.\n"; - if ( result >= 0 ) { + } + } - pass = FALSE; - failure_mssg = "insert of duplicate entry succeeded.\n"; + if ( pass ) { - } else { + rpt_fcn_called = FALSE; + i = 0; + while ( ( pass ) && ( i < 1000 ) ) + { + protect_entry(cache_ptr, PICO_ENTRY_TYPE, i + 500); - unprotect_entry(cache_ptr, 0, 0, TRUE, FALSE); + if ( pass ) { - takedown_cache(cache_ptr, FALSE, FALSE); + unprotect_entry(cache_ptr, PICO_ENTRY_TYPE, i + 500, + NO_CHANGE, FALSE); } + i++; } } - if ( pass ) { PASSED(); } else { H5_FAILED(); } - - if ( ! pass ) - HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); -} /* check_duplicate_insert_err() */ + if ( pass ) { - -/*------------------------------------------------------------------------- - * Function: check_rename_err() - * - * Purpose: Verify that an attempt to rename an entry to the address - * of an existing entry will generate an error. - * - * Return: void - * - * Programmer: John Mainzer - * 6/24/04 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ + result = H5C_get_cache_hit_rate(cache_ptr, &hit_rate); -static void -check_rename_err(void) -{ - const char * fcn_name = "check_rename_err()"; - herr_t result; - H5C_t * cache_ptr = NULL; - test_entry_t * entry_0_0_ptr; - test_entry_t * entry_0_1_ptr; - test_entry_t * entry_1_0_ptr; + if ( result != SUCCEED ) { - TESTING("rename to existing entry errors"); + pass = FALSE; + failure_mssg = "H5C_get_cache_hit_rate failed.\n"; - pass = TRUE; + } else if ( hit_rate != 0.5 ) { - /* allocate a cache, and insert several entries. Try to rename - * entries to other entries resident in the cache. This should - * fail. Destroy the cache -- should succeed. - */ + pass = FALSE; + failure_mssg = + "H5C_get_cache_hit_rate returned unexpected hit rate 4.\n"; - if ( pass ) { + } else if ( ( cache_ptr->cache_accesses != 1000 ) || + ( cache_ptr->cache_hits != 500 ) ) { - reset_entries(); + pass = FALSE; + failure_mssg = "Unexpected cache hit rate stats.\n"; - cache_ptr = setup_cache((size_t)(2 * 1024), - (size_t)(1 * 1024)); + } else if ( rpt_fcn_called ) { - insert_entry(cache_ptr, 0, 0, TRUE); - insert_entry(cache_ptr, 0, 1, TRUE); - insert_entry(cache_ptr, 1, 0, TRUE); + pass = FALSE; + failure_mssg = "Report function called?.\n"; - entry_0_0_ptr = &((entries[0])[0]); - entry_0_1_ptr = &((entries[0])[1]); - entry_1_0_ptr = &((entries[1])[0]); + } } + /*************************************************** + * So much for testing H5C_get_cache_hit_rate() and + * H5C_reset_cache_hit_rate_stats(). Now on to + * H5C_get_cache_size(). + ***************************************************/ + if ( pass ) { - result = H5C_rename_entry(NULL, cache_ptr, &(types[0]), - entry_0_0_ptr->addr, entry_0_1_ptr->addr); + result = H5C_get_cache_size(NULL, &max_size, &min_clean_size, + &cur_size, &cur_num_entries); - if ( result >= 0 ) { + if ( result != FAIL ) { pass = FALSE; - failure_mssg = "rename to addr of same type succeeded.\n"; + failure_mssg = "H5C_get_cache_size accepted NULL cache_ptr.\n"; } } if ( pass ) { - result = H5C_rename_entry(NULL, cache_ptr, &(types[0]), - entry_0_0_ptr->addr, entry_1_0_ptr->addr); + max_size = 0; + min_clean_size = 0; + cur_size = 0; + cur_num_entries = 0; - if ( result >= 0 ) { + result = H5C_get_cache_size(cache_ptr, &max_size, &min_clean_size, + &cur_size, &cur_num_entries); + + if ( result != SUCCEED ) { pass = FALSE; - failure_mssg = "rename to addr of different type succeeded.\n"; + failure_mssg = "H5C_get_cache_size failed 1.\n"; + + } else if ( max_size != (1 * 1024 * 1024) ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected max_size 1.\n"; + + } else if ( min_clean_size != (512 * 1024) ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected min_clean_size 1.\n"; + + } else if ( cur_size != (1500 * PICO_ENTRY_SIZE) ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected cur_size 1.\n"; + + } else if ( cur_num_entries != 1500 ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected cur_num_entries 1.\n"; } } + /* read a larger entry so that cur_size and cur_num_entries will be + * different. + */ if ( pass ) { - takedown_cache(cache_ptr, FALSE, FALSE); + protect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0); } - if ( pass ) { PASSED(); } else { H5_FAILED(); } + if ( pass ) { + unprotect_entry(cache_ptr, MONSTER_ENTRY_TYPE, 0, NO_CHANGE, FALSE); + } - if ( ! pass ) - HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + if ( pass ) { -} /* check_rename_err() */ + max_size = 0; + min_clean_size = 0; + cur_size = 0; + cur_num_entries = 0; - -/*------------------------------------------------------------------------- - * Function: check_double_protect_err() - * - * Purpose: Verify that an attempt to protect an entry that is already - * protected will generate an error. - * - * Return: void - * - * Programmer: John Mainzer - * 6/24/04 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ + result = H5C_get_cache_size(cache_ptr, &max_size, &min_clean_size, + &cur_size, &cur_num_entries); -static void -check_double_protect_err(void) -{ - const char * fcn_name = "check_double_protect_err()"; - H5C_t * cache_ptr = NULL; - test_entry_t * entry_ptr; - H5C_cache_entry_t * cache_entry_ptr; + if ( result != SUCCEED ) { - TESTING("protect a protected entry error"); + pass = FALSE; + failure_mssg = "H5C_get_cache_size failed 2.\n"; - pass = TRUE; + } else if ( max_size != (1 * 1024 * 1024) ) { - /* allocate a cache, protect an entry, and then try to protect - * the entry again. This should fail. Unprotect the entry and - * destroy the cache -- should succeed. - */ + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected max_size 2.\n"; - if ( pass ) { + } else if ( min_clean_size != (512 * 1024) ) { - reset_entries(); + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected min_clean_size 2.\n"; - cache_ptr = setup_cache((size_t)(2 * 1024), - (size_t)(1 * 1024)); + } else if ( cur_size != + ((1500 * PICO_ENTRY_SIZE) + MONSTER_ENTRY_SIZE) ) { - protect_entry(cache_ptr, 0, 0); + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected cur_size 2.\n"; - entry_ptr = &((entries[0])[0]); + } else if ( cur_num_entries != 1501 ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected cur_num_entries 2.\n"; + } } if ( pass ) { - cache_entry_ptr = H5C_protect(NULL, -1, -1, cache_ptr, &(types[0]), - entry_ptr->addr, NULL, NULL); + max_size = 0; + min_clean_size = 0; + cur_size = 0; + cur_num_entries = 0; - if ( cache_entry_ptr != NULL ) { + result = H5C_get_cache_size(cache_ptr, &max_size, NULL, NULL, NULL); + + if ( result != SUCCEED ) { pass = FALSE; - failure_mssg = "attempt to protect a protected entry succeeded.\n"; - } - } + failure_mssg = "H5C_get_cache_size failed 3.\n"; - if ( pass ) { + } else if ( max_size != (1 * 1024 * 1024) ) { - unprotect_entry(cache_ptr, 0, 0, FALSE, FALSE); + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected max_size 3.\n"; + + } else if ( ( min_clean_size != 0 ) || + ( cur_size != 0 ) || + ( cur_num_entries != 0 ) ) { + + pass = FALSE; + failure_mssg = "Phantom returns from H5C_get_cache_size?\n"; + + } } if ( pass ) { - takedown_cache(cache_ptr, FALSE, FALSE); - } + max_size = 0; + min_clean_size = 0; + cur_size = 0; + cur_num_entries = 0; - if ( pass ) { PASSED(); } else { H5_FAILED(); } + result = H5C_get_cache_size(cache_ptr, NULL, &min_clean_size, + NULL, NULL); - if ( ! pass ) - HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + if ( result != SUCCEED ) { -} /* check_double_protect_err() */ + pass = FALSE; + failure_mssg = "H5C_get_cache_size failed 4.\n"; - -/*------------------------------------------------------------------------- - * Function: check_double_unprotect_err() - * - * Purpose: Verify that an attempt to unprotect an entry that is already - * unprotected will generate an error. - * - * Return: void - * - * Programmer: John Mainzer - * 6/24/04 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ + } else if ( min_clean_size != (512 * 1024) ) { -static void -check_double_unprotect_err(void) -{ - const char * fcn_name = "check_double_unprotect_err()"; - herr_t result; - H5C_t * cache_ptr = NULL; - test_entry_t * entry_ptr; + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected min_clean_size 4.\n"; - TESTING("unprotect an unprotected entry error"); + } else if ( ( max_size != 0 ) || + ( cur_size != 0 ) || + ( cur_num_entries != 0 ) ) { - pass = TRUE; + pass = FALSE; + failure_mssg = "Phantom returns from H5C_get_cache_size?\n"; - /* allocate a cache, protect an entry, unprotect it, and then try to - * unprotect the entry again. This should fail. Destroy the cache - * -- should succeed. - */ + } + } if ( pass ) { - reset_entries(); + max_size = 0; + min_clean_size = 0; + cur_size = 0; + cur_num_entries = 0; - cache_ptr = setup_cache((size_t)(2 * 1024), - (size_t)(1 * 1024)); + result = H5C_get_cache_size(cache_ptr, NULL, NULL, &cur_size, NULL); - protect_entry(cache_ptr, 0, 0); + if ( result != SUCCEED ) { - unprotect_entry(cache_ptr, 0, 0, FALSE, FALSE); + pass = FALSE; + failure_mssg = "H5C_get_cache_size failed 5.\n"; - entry_ptr = &((entries[0])[0]); + } else if ( cur_size != + ((1500 * PICO_ENTRY_SIZE) + MONSTER_ENTRY_SIZE) ) { + + pass = FALSE; + failure_mssg = + "H5C_get_cache_size reports unexpected cur_size 5.\n"; + + } else if ( ( max_size != 0 ) || + ( min_clean_size != 0 ) || + ( cur_num_entries != 0 ) ) { + + pass = FALSE; + failure_mssg = "Phantom returns from H5C_get_cache_size?\n"; + + } } if ( pass ) { - result = H5C_unprotect(NULL, -1, -1, cache_ptr, &(types[0]), - entry_ptr->addr, (void *)entry_ptr, FALSE); + max_size = 0; + min_clean_size = 0; + cur_size = 0; + cur_num_entries = 0; - if ( result > 0 ) { + result = H5C_get_cache_size(cache_ptr, NULL, NULL, NULL, + &cur_num_entries); + + if ( result != SUCCEED ) { + + pass = FALSE; + failure_mssg = "H5C_get_cache_size failed 6.\n"; + + } else if ( cur_num_entries != 1501 ) { pass = FALSE; failure_mssg = - "attempt to unprotect an unprotected entry succeeded.\n"; - } + "H5C_get_cache_size reports unexpected cur_num_entries 2.\n"; + + } else if ( ( max_size != 0 ) || + ( min_clean_size != 0 ) || + ( cur_size != 0 ) ) { + + pass = FALSE; + failure_mssg = "Phantom returns from H5C_get_cache_size?\n"; + + } } if ( pass ) { @@ -4119,7 +14292,7 @@ check_double_unprotect_err(void) HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", fcn_name, failure_mssg); -} /* check_double_unprotect_err() */ +} /* check_auto_cache_resize_aux_fcns() */ /*------------------------------------------------------------------------- @@ -4142,11 +14315,16 @@ int main(void) { H5open(); - +#if 1 smoke_check_1(); smoke_check_2(); smoke_check_3(); smoke_check_4(); + smoke_check_5(); + smoke_check_6(); + smoke_check_7(); + smoke_check_8(); +#endif write_permitted_check(); check_flush_protected_err(); check_destroy_protected_err(); @@ -4154,6 +14332,11 @@ main(void) check_rename_err(); check_double_protect_err(); check_double_unprotect_err(); + check_auto_cache_resize(); + check_auto_cache_resize_disable(); + check_auto_cache_resize_epoch_markers(); + check_auto_cache_resize_input_errs(); + check_auto_cache_resize_aux_fcns(); return(0); -- cgit v0.12