From adbb64c6cd100915dfd62235c10d48a3b4162bd9 Mon Sep 17 00:00:00 2001 From: Quincey Koziol Date: Mon, 15 Jun 2015 11:07:38 -0500 Subject: [svn-r27204] Description: Clean up compiler warnings before merging in v3 metadata cache changes from branch. Tested on: MacOSX/64 10.10.3 (amazon) w/serial & parallel Linux/32 2.6.x (jam) w/serial & parallel --- hl/test/test_lite.c | 16 +- hl/test/test_packet.c | 4 +- hl/test/test_table.c | 26 +- src/H5C.c | 41 +- src/H5Cpkg.h | 6476 ++++++++++++++++++++++++------------------------- src/H5Cprivate.h | 617 ++--- test/cache.c | 510 ++-- test/cache_api.c | 59 +- test/cache_common.c | 107 +- test/cache_common.h | 19 +- test/cmpd_dset.c | 20 +- test/cross_read.c | 2 +- test/dsets.c | 20 +- test/h5test.h | 18 - test/hyperslab.c | 2 +- test/tarray.c | 18 +- test/tattr.c | 10 +- test/tgenprop.c | 12 +- test/th5s.c | 2 +- test/tmisc.c | 16 +- test/tvltypes.c | 14 +- testpar/t_cache.c | 417 ++-- 22 files changed, 4056 insertions(+), 4370 deletions(-) diff --git a/hl/test/test_lite.c b/hl/test/test_lite.c index 2337610..d4e5f93 100644 --- a/hl/test/test_lite.c +++ b/hl/test/test_lite.c @@ -300,7 +300,7 @@ static int test_dsets( void ) for (i = 0; i < DIM; i++) { - if(!FLT_ABS_EQUAL(data_float_in[i],data_float_out[i])) { + if(!H5_FLT_ABS_EQUAL(data_float_in[i],data_float_out[i])) { goto out; } } @@ -311,7 +311,7 @@ static int test_dsets( void ) for (i = 0; i < DIM; i++) { - if(!FLT_ABS_EQUAL(data_float_in[i],data_float_out[i])) { + if(!H5_FLT_ABS_EQUAL(data_float_in[i],data_float_out[i])) { goto out; } } @@ -336,7 +336,7 @@ static int test_dsets( void ) for (i = 0; i < DIM; i++) { - if(!DBL_ABS_EQUAL(data_double_in[i],data_double_out[i])) { + if(!H5_DBL_ABS_EQUAL(data_double_in[i],data_double_out[i])) { goto out; } } @@ -347,7 +347,7 @@ static int test_dsets( void ) for (i = 0; i < DIM; i++) { - if(!DBL_ABS_EQUAL(data_double_in[i],data_double_out[i])) { + if(!H5_DBL_ABS_EQUAL(data_double_in[i],data_double_out[i])) { goto out; } } @@ -959,7 +959,7 @@ static herr_t make_attributes( hid_t loc_id, const char* obj_name ) for (i = 0; i < 5; i++) { - if(!FLT_ABS_EQUAL(attr_float_in[i],attr_float_out[i])) { + if(!H5_FLT_ABS_EQUAL(attr_float_in[i],attr_float_out[i])) { return -1; } } @@ -970,7 +970,7 @@ static herr_t make_attributes( hid_t loc_id, const char* obj_name ) for (i = 0; i < 5; i++) { - if(!FLT_ABS_EQUAL(attr_float_in[i],attr_float_out[i])) { + if(!H5_FLT_ABS_EQUAL(attr_float_in[i],attr_float_out[i])) { return -1; } } @@ -1003,7 +1003,7 @@ static herr_t make_attributes( hid_t loc_id, const char* obj_name ) for (i = 0; i < 5; i++) { - if(!DBL_ABS_EQUAL(attr_double_in[i],attr_double_out[i])) { + if(!H5_DBL_ABS_EQUAL(attr_double_in[i],attr_double_out[i])) { return -1; } } @@ -1014,7 +1014,7 @@ static herr_t make_attributes( hid_t loc_id, const char* obj_name ) for (i = 0; i < 5; i++) { - if(!DBL_ABS_EQUAL(attr_double_in[i],attr_double_out[i])) { + if(!H5_DBL_ABS_EQUAL(attr_double_in[i],attr_double_out[i])) { return -1; } } diff --git a/hl/test/test_packet.c b/hl/test/test_packet.c index 256892b..345aecb 100644 --- a/hl/test/test_packet.c +++ b/hl/test/test_packet.c @@ -75,8 +75,8 @@ static int cmp_par(size_t i, size_t j, particle_t *rbuf, particle_t *wbuf ) if ( ( HDstrcmp( rbuf[i].name, wbuf[j].name ) != 0 ) || rbuf[i].lati != wbuf[j].lati || rbuf[i].longi != wbuf[j].longi || - !FLT_ABS_EQUAL(rbuf[i].pressure,wbuf[j].pressure) || - !DBL_ABS_EQUAL(rbuf[i].temperature,wbuf[j].temperature) ) { + !H5_FLT_ABS_EQUAL(rbuf[i].pressure,wbuf[j].pressure) || + !H5_DBL_ABS_EQUAL(rbuf[i].temperature,wbuf[j].temperature) ) { return -1; } return 0; diff --git a/hl/test/test_table.c b/hl/test/test_table.c index 3bc5e5f..c9c3c19 100644 --- a/hl/test/test_table.c +++ b/hl/test/test_table.c @@ -166,8 +166,8 @@ static int cmp_par(hsize_t i, hsize_t j, particle_t *rbuf, particle_t *wbuf ) if ( ( HDstrcmp( rbuf[i].name, wbuf[j].name ) != 0 ) || rbuf[i].lati != wbuf[j].lati || rbuf[i].longi != wbuf[j].longi || - !FLT_ABS_EQUAL(rbuf[i].pressure,wbuf[j].pressure) || - !DBL_ABS_EQUAL(rbuf[i].temperature,wbuf[j].temperature) ) + !H5_FLT_ABS_EQUAL(rbuf[i].pressure,wbuf[j].pressure) || + !H5_DBL_ABS_EQUAL(rbuf[i].temperature,wbuf[j].temperature) ) { HDfprintf(stderr,"read and write buffers have differences\n"); HDfprintf(stderr,"%s %ld %f %f %d\n", @@ -1140,7 +1140,7 @@ static int test_table(hid_t fid, int do_write) { if ( rbuf[i].lati != position_in[i-NRECORDS_ADD+1].lati || rbuf[i].longi != position_in[i-NRECORDS_ADD+1].longi || - !FLT_ABS_EQUAL(rbuf[i].pressure,pressure_in[i-NRECORDS_ADD+1]) ) + !H5_FLT_ABS_EQUAL(rbuf[i].pressure,pressure_in[i-NRECORDS_ADD+1]) ) { HDfprintf(stderr,"%ld %f %d\n", rbuf[i].longi,(double)rbuf[i].pressure,rbuf[i].lati); @@ -1202,7 +1202,7 @@ static int test_table(hid_t fid, int do_write) /* Compare the extracted table with the initial values */ for ( i = 0; i < NRECORDS; i++ ) { - if ( !FLT_ABS_EQUAL(pressure_out[i], pressure_in[i]) ) { + if ( !H5_FLT_ABS_EQUAL(pressure_out[i], pressure_in[i]) ) { goto out; } } @@ -1265,7 +1265,7 @@ static int test_table(hid_t fid, int do_write) for( i = 0; i < NRECORDS; i++ ) { if ( ( HDstrcmp( namepre_out[i].name, namepre_in[i].name ) != 0 ) || - !FLT_ABS_EQUAL(namepre_out[i].pressure,namepre_in[i].pressure) ) { + !H5_FLT_ABS_EQUAL(namepre_out[i].pressure,namepre_in[i].pressure) ) { goto out; } } @@ -1294,7 +1294,7 @@ static int test_table(hid_t fid, int do_write) { hsize_t iistart = start; if ( ( HDstrcmp( namepre_out[i].name, namepre_in[iistart+i].name ) != 0 ) || - !FLT_ABS_EQUAL(namepre_out[i].pressure, namepre_in[iistart+i].pressure) ) { + !H5_FLT_ABS_EQUAL(namepre_out[i].pressure, namepre_in[iistart+i].pressure) ) { goto out; } } @@ -1353,7 +1353,7 @@ static int test_table(hid_t fid, int do_write) { if ( rbuf[i].lati != position_in[i-NRECORDS_ADD+1].lati || rbuf[i].longi != position_in[i-NRECORDS_ADD+1].longi || - !FLT_ABS_EQUAL(rbuf[i].pressure,pressure_in[i-NRECORDS_ADD+1]) ) + !H5_FLT_ABS_EQUAL(rbuf[i].pressure,pressure_in[i-NRECORDS_ADD+1]) ) goto out; } } @@ -1406,7 +1406,7 @@ static int test_table(hid_t fid, int do_write) /* compare the extracted table with the initial values */ for( i = 0; i < NRECORDS; i++ ) { - if ( !FLT_ABS_EQUAL(pressure_out[i], pressure_in[i]) ) { + if ( !H5_FLT_ABS_EQUAL(pressure_out[i], pressure_in[i]) ) { goto out; } } @@ -1472,7 +1472,7 @@ static int test_table(hid_t fid, int do_write) for( i = 0; i < NRECORDS; i++ ) { if ( ( HDstrcmp( namepre_out[i].name, namepre_in[i].name ) != 0 ) || - !FLT_ABS_EQUAL(namepre_out[i].pressure,namepre_in[i].pressure) ) { + !H5_FLT_ABS_EQUAL(namepre_out[i].pressure,namepre_in[i].pressure) ) { goto out; } } @@ -1503,7 +1503,7 @@ static int test_table(hid_t fid, int do_write) { int iistart = (int) start; if ( ( HDstrcmp( namepre_out[i].name, wbuf[iistart+(int)i].name ) != 0 ) || - !FLT_ABS_EQUAL(namepre_out[i].pressure, wbuf[iistart+(int)i].pressure) ) { + !H5_FLT_ABS_EQUAL(namepre_out[i].pressure, wbuf[iistart+(int)i].pressure) ) { goto out; } } @@ -1546,8 +1546,8 @@ static int test_table(hid_t fid, int do_write) if ( ( HDstrcmp( rbuf2[i].name, wbuf[i].name ) != 0 ) || rbuf2[i].lati != wbuf[i].lati || rbuf2[i].longi != wbuf[i].longi || - !FLT_ABS_EQUAL(rbuf2[i].pressure,wbuf[i].pressure) || - !DBL_ABS_EQUAL(rbuf2[i].temperature,wbuf[i].temperature) || + !H5_FLT_ABS_EQUAL(rbuf2[i].pressure,wbuf[i].pressure) || + !H5_DBL_ABS_EQUAL(rbuf2[i].temperature,wbuf[i].temperature) || rbuf2[i].new_field != buf_new[i] ) { goto out; } @@ -1587,7 +1587,7 @@ static int test_table(hid_t fid, int do_write) if ( ( HDstrcmp( rbuf3[i].name, wbuf[i].name ) != 0 ) || rbuf3[i].lati != wbuf[i].lati || rbuf3[i].longi != wbuf[i].longi || - !DBL_ABS_EQUAL(rbuf3[i].temperature,wbuf[i].temperature) ) { + !H5_DBL_ABS_EQUAL(rbuf3[i].temperature,wbuf[i].temperature) ) { goto out; } } diff --git a/src/H5C.c b/src/H5C.c index d6a89e6..5cdd65f 100644 --- a/src/H5C.c +++ b/src/H5C.c @@ -1718,10 +1718,10 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign H5C_cache_entry_t * entry_ptr = NULL; H5C_cache_entry_t * next_entry_ptr = NULL; #if H5C_DO_SANITY_CHECKS - int64_t flushed_entries_count; - size_t flushed_entries_size; - int64_t initial_slist_len; - size_t initial_slist_size; + int64_t flushed_entries_count = 0; + int64_t flushed_entries_size = 0; + int64_t initial_slist_len = 0; + size_t initial_slist_size = 0; #endif /* H5C_DO_SANITY_CHECKS */ FUNC_ENTER_NOAPI(FAIL) @@ -1943,7 +1943,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign if(entry_ptr->flush_dep_height == curr_flush_dep_height ) { #if H5C_DO_SANITY_CHECKS flushed_entries_count++; - flushed_entries_size += entry_ptr->size; + flushed_entries_size += (int64_t)entry_ptr->size; #endif /* H5C_DO_SANITY_CHECKS */ status = H5C_flush_single_entry(f, primary_dxpl_id, @@ -1976,7 +1976,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign if(entry_ptr->flush_dep_height == curr_flush_dep_height ) { #if H5C_DO_SANITY_CHECKS flushed_entries_count++; - flushed_entries_size += entry_ptr->size; + flushed_entries_size += (int64_t)entry_ptr->size; #endif /* H5C_DO_SANITY_CHECKS */ status = H5C_flush_single_entry(f, primary_dxpl_id, @@ -2027,8 +2027,8 @@ end_of_inner_loop: HDassert( (initial_slist_len + cache_ptr->slist_len_increase - flushed_entries_count) == cache_ptr->slist_len ); - HDassert( (initial_slist_size + - (size_t)(cache_ptr->slist_size_increase) - + HDassert( (size_t)((int64_t)initial_slist_size + + cache_ptr->slist_size_increase - flushed_entries_size) == cache_ptr->slist_size ); #endif /* H5C_DO_SANITY_CHECKS */ @@ -4647,8 +4647,6 @@ H5C_stats(H5C_t * cache_ptr, #endif /* H5C_COLLECT_CACHE_STATS */ display_detailed_stats) { - herr_t ret_value = SUCCEED; /* Return value */ - #if H5C_COLLECT_CACHE_STATS int i; int64_t total_hits = 0; @@ -4680,11 +4678,12 @@ H5C_stats(H5C_t * cache_ptr, size_t aggregate_max_size = 0; int32_t aggregate_max_pins = 0; double hit_rate; - double average_successful_search_depth = 0.0; - double average_failed_search_depth = 0.0; - double average_entries_skipped_per_calls_to_msic = 0.0; - double average_entries_scanned_per_calls_to_msic = 0.0; + double average_successful_search_depth = 0.0f; + double average_failed_search_depth = 0.0f; + double average_entries_skipped_per_calls_to_msic = 0.0f; + double average_entries_scanned_per_calls_to_msic = 0.0f; #endif /* H5C_COLLECT_CACHE_STATS */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -4748,10 +4747,10 @@ H5C_stats(H5C_t * cache_ptr, if ( ( total_hits > 0 ) || ( total_misses > 0 ) ) { - hit_rate = 100.0 * ((double)(total_hits)) / + hit_rate = (double)100.0f * ((double)(total_hits)) / ((double)(total_hits + total_misses)); } else { - hit_rate = 0.0; + hit_rate = 0.0f; } if ( cache_ptr->successful_ht_searches > 0 ) { @@ -4918,7 +4917,7 @@ H5C_stats(H5C_t * cache_ptr, HDfprintf(stdout, "%s MSIC: Average/max entries skipped = %lf / %ld\n", cache_ptr->prefix, - (float)average_entries_skipped_per_calls_to_msic, + (double)average_entries_skipped_per_calls_to_msic, (long)(cache_ptr->max_entries_skipped_in_msic)); if (cache_ptr->calls_to_msic > 0) { @@ -4929,7 +4928,7 @@ H5C_stats(H5C_t * cache_ptr, HDfprintf(stdout, "%s MSIC: Average/max entries scanned = %lf / %ld\n", cache_ptr->prefix, - (float)average_entries_scanned_per_calls_to_msic, + (double)average_entries_scanned_per_calls_to_msic, (long)(cache_ptr->max_entries_scanned_in_msic)); HDfprintf(stdout, "%s MSIC: Scanned to make space(evict) = %lld\n", @@ -4973,10 +4972,10 @@ H5C_stats(H5C_t * cache_ptr, if ( ( cache_ptr->hits[i] > 0 ) || ( cache_ptr->misses[i] > 0 ) ) { - hit_rate = 100.0 * ((double)(cache_ptr->hits[i])) / + hit_rate = (double)100.0f * ((double)(cache_ptr->hits[i])) / ((double)(cache_ptr->hits[i] + cache_ptr->misses[i])); } else { - hit_rate = 0.0; + hit_rate = 0.0f; } HDfprintf(stdout, @@ -8229,7 +8228,6 @@ H5C_flush_single_entry(H5F_t * f, hbool_t was_dirty; hbool_t destroy_entry; herr_t status; - int type_id; unsigned flush_flags = H5C_CALLBACK__NO_FLAGS_SET; H5C_cache_entry_t * entry_ptr = NULL; herr_t ret_value = SUCCEED; /* Return value */ @@ -8330,7 +8328,6 @@ H5C_flush_single_entry(H5F_t * f, #endif /* H5_HAVE_PARALLEL */ was_dirty = entry_ptr->is_dirty; - type_id = entry_ptr->type->id; entry_ptr->flush_marker = FALSE; diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 7c278e8..5df84cd 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -35,14 +35,16 @@ #ifndef _H5Cpkg_H #define _H5Cpkg_H - /* Get package's private header */ #include "H5Cprivate.h" - -/* Get needed headers */ +/* Other private headers needed by this file */ #include "H5SLprivate.h" /* Skip lists */ +/**************************/ +/* Package Private Macros */ +/**************************/ + /* With the introduction of the fractal heap, it is now possible for * entries to be dirtied, resized, and/or moved in the flush callbacks. * As a result, on flushes, it may be necessary to make multiple passes @@ -53,3552 +55,3536 @@ * * -- JRM */ - #define H5C__MAX_PASSES_ON_FLUSH 4 - +/* Cache configuration settings */ +#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */ +#define H5C__H5C_T_MAGIC 0x005CAC0E +#define H5C__MAX_NUM_TYPE_IDS 27 +#define H5C__PREFIX_LEN 32 /**************************************************************************** * - * structure H5C_t - * - * Catchall structure for all variables specific to an instance of the cache. - * - * While the individual fields of the structure are discussed below, the - * following overview may be helpful. - * - * Entries in the cache are stored in an instance of H5TB_TREE, indexed on - * the entry's disk address. While the H5TB_TREE is less efficient than - * hash table, it keeps the entries in address sorted order. As flushes - * in parallel mode are more efficient if they are issued in increasing - * address order, this is a significant benefit. Also the H5TB_TREE code - * was readily available, which reduced development time. - * - * While the cache was designed with multiple replacement policies in mind, - * at present only a modified form of LRU is supported. - * - * JRM - 4/26/04 - * - * Profiling has indicated that searches in the instance of H5TB_TREE are - * too expensive. To deal with this issue, I have augmented the cache - * with a hash table in which all entries will be stored. Given the - * advantages of flushing entries in increasing address order, the TBBT - * is retained, but only dirty entries are stored in it. At least for - * now, we will leave entries in the TBBT after they are flushed. - * - * Note that index_size and index_len now refer to the total size of - * and number of entries in the hash table. - * - * JRM - 7/19/04 - * - * The TBBT has since been replaced with a skip list. This change - * greatly predates this note. - * - * JRM - 9/26/05 - * - * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. - * This field is used to validate pointers to instances of - * H5C_t. - * - * flush_in_progress: Boolean flag indicating whether a flush is in - * progress. - * - * trace_file_ptr: File pointer pointing to the trace file, which is used - * to record cache operations for use in simulations and design - * studies. This field will usually be NULL, indicating that - * no trace file should be recorded. - * - * Since much of the code supporting the parallel metadata - * cache is in H5AC, we don't write the trace file from - * H5C. Instead, H5AC reads the trace_file_ptr as needed. - * - * When we get to using H5C in other places, we may add - * code to write trace file data at the H5C level as well. - * - * aux_ptr: Pointer to void used to allow wrapper code to associate - * its data with an instance of H5C_t. The H5C cache code - * sets this field to NULL, and otherwise leaves it alone. - * - * max_type_id: Integer field containing the maximum type id number assigned - * to a type of entry in the cache. All type ids from 0 to - * max_type_id inclusive must be defined. The names of the - * types are stored in the type_name_table discussed below, and - * indexed by the ids. - * - * type_name_table_ptr: Pointer to an array of pointer to char of length - * max_type_id + 1. The strings pointed to by the entries - * in the array are the names of the entry types associated - * with the indexing type IDs. - * - * max_cache_size: Nominal maximum number of bytes that may be stored in the - * cache. This value should be viewed as a soft limit, as the - * cache can exceed this value under the following circumstances: - * - * a) All entries in the cache are protected, and the cache is - * asked to insert a new entry. In this case the new entry - * will be created. If this causes the cache to exceed - * max_cache_size, it will do so. The cache will attempt - * to reduce its size as entries are unprotected. - * - * b) When running in parallel mode, the cache may not be - * permitted to flush a dirty entry in response to a read. - * If there are no clean entries available to evict, the - * cache will exceed its maximum size. Again the cache - * will attempt to reduce its size to the max_cache_size - * limit on the next cache write. - * - * c) When an entry increases in size, the cache may exceed - * the max_cache_size limit until the next time the cache - * attempts to load or insert an entry. - * - * min_clean_size: Nominal minimum number of clean bytes in the cache. - * The cache attempts to maintain this number of bytes of - * clean data so as to avoid case b) above. Again, this is - * a soft limit. - * - * - * In addition to the call back functions required for each entry, the - * cache requires the following call back functions for this instance of - * the cache as a whole: - * - * check_write_permitted: In certain applications, the cache may not - * be allowed to write to disk at certain time. If specified, - * the check_write_permitted function is used to determine if - * a write is permissible at any given point in time. - * - * If no such function is specified (i.e. this field is NULL), - * the cache uses the following write_permitted field to - * determine whether writes are permitted. - * - * write_permitted: If check_write_permitted is NULL, this boolean flag - * indicates whether writes are permitted. + * We maintain doubly linked lists of instances of H5C_cache_entry_t for a + * variety of reasons -- protected list, LRU list, and the clean and dirty + * LRU lists at present. The following macros support linking and unlinking + * of instances of H5C_cache_entry_t by both their regular and auxilary next + * and previous pointers. * - * log_flush: If provided, this function is called whenever a dirty - * entry is flushed to disk. + * The size and length fields are also maintained. * + * Note that the relevant pair of prev and next pointers are presumed to be + * NULL on entry in the insertion macros. * - * In cases where memory is plentiful, and performance is an issue, it may - * be useful to disable all cache evictions, and thereby postpone metadata - * writes. The following field is used to implement this. + * Finally, observe that the sanity checking macros evaluate to the empty + * string when H5C_DO_SANITY_CHECKS is FALSE. They also contain calls + * to the HGOTO_ERROR macro, which may not be appropriate in all cases. + * If so, we will need versions of the insertion and deletion macros which + * do not reference the sanity checking macros. + * JRM - 5/5/04 * - * evictions_enabled: Boolean flag that is initialized to TRUE. When - * this flag is set to FALSE, the metadata cache will not - * attempt to evict entries to make space for newly protected - * entries, and instead the will grow without limit. - * - * Needless to say, this feature must be used with care. + * Changes: * + * - Removed the line: * - * The cache requires an index to facilitate searching for entries. The - * following fields support that index. + * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) || * - * index_len: Number of entries currently in the hash table used to index - * the cache. + * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the + * epoch markers used in the age out based cache size reduction algorithm, + * this invariant need not hold, as the epoch markers are of size 0. * - * index_size: Number of bytes of cache entries currently stored in the - * hash table used to index the cache. + * One could argue that I should have given the epoch markers a positive + * size, but this would break the index_size = LRU_list_size + pl_size + * + pel_size invariant. * - * This value should not be mistaken for footprint of the - * cache in memory. The average cache entry is small, and - * the cache has a considerable overhead. Multiplying the - * index_size by two should yield a conservative estimate - * of the cache's memory footprint. + * Alternatively, I could pass the current decr_mode in to the macro, + * and just skip the check whenever epoch markers may be in use. * - * clean_index_size: Number of bytes of clean entries currently stored in - * the hash table. Note that the index_size field (above) - * is also the sum of the sizes of all entries in the cache. - * Thus we should have the invarient that clean_index_size + - * dirty_index_size == index_size. + * However, any size errors should be caught when the cache is flushed + * and destroyed. Until we are tracking such an error, this should be + * good enough. + * JRM - 12/9/04 * - * WARNING: * - * 1) The clean_index_size field is not maintained by the - * index macros, as the hash table doesn't care whether - * the entry is clean or dirty. Instead the field is - * maintained in the H5C__UPDATE_RP macros. + * - In the H5C__DLL_PRE_INSERT_SC macro, replaced the lines: * - * 2) The value of the clean_index_size must not be mistaken - * for the current clean size of the cache. Rather, the - * clean size of the cache is the current value of - * clean_index_size plus the amount of empty space (if any) - * in the cache. + * ( ( (len) == 1 ) && + * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || + * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) + * ) + * ) || * - * dirty_index_size: Number of bytes of dirty entries currently stored in - * the hash table. Note that the index_size field (above) - * is also the sum of the sizes of all entries in the cache. - * Thus we should have the invarient that clean_index_size + - * dirty_index_size == index_size. + * with: * - * WARNING: + * ( ( (len) == 1 ) && + * ( ( (head_ptr) != (tail_ptr) ) || + * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) + * ) + * ) || * - * 1) The dirty_index_size field is not maintained by the - * index macros, as the hash table doesn't care whether - * the entry is clean or dirty. Instead the field is - * maintained in the H5C__UPDATE_RP macros. + * Epoch markers have size 0, so we can now have a non-empty list with + * zero size. Hence the "( (Size) <= 0 )" clause cause false failures + * in the sanity check. Since "Size" is typically a size_t, it can't + * take on negative values, and thus the revised clause "( (Size) < 0 )" + * caused compiler warnings. + * JRM - 12/22/04 * - * index: Array of pointer to H5C_cache_entry_t of size - * H5C__HASH_TABLE_LEN. At present, this value is a power - * of two, not the usual prime number. + * - In the H5C__DLL_SC macro, replaced the lines: * - * I hope that the variable size of cache elements, the large - * hash table size, and the way in which HDF5 allocates space - * will combine to avoid problems with periodicity. If so, we - * can use a trivial hash function (a bit-and and a 3 bit left - * shift) with some small savings. + * ( ( (len) == 1 ) && + * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) || + * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) + * ) + * ) || * - * If not, it will become evident in the statistics. Changing - * to the usual prime number length hash table will require - * changing the H5C__HASH_FCN macro and the deletion of the - * H5C__HASH_MASK #define. No other changes should be required. + * with * + * ( ( (len) == 1 ) && + * ( ( (head_ptr) != (tail_ptr) ) || + * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) + * ) + * ) || * - * When we flush the cache, we need to write entries out in increasing - * address order. An instance of a skip list is used to store dirty entries in - * sorted order. Whether it is cheaper to sort the dirty entries as needed, - * or to maintain the list is an open question. At a guess, it depends - * on how frequently the cache is flushed. We will see how it goes. + * Epoch markers have size 0, so we can now have a non-empty list with + * zero size. Hence the "( (Size) <= 0 )" clause cause false failures + * in the sanity check. Since "Size" is typically a size_t, it can't + * take on negative values, and thus the revised clause "( (Size) < 0 )" + * caused compiler warnings. + * JRM - 1/10/05 * - * For now at least, I will not remove dirty entries from the list as they - * are flushed. (this has been changed -- dirty entries are now removed from - * the skip list as they are flushed. JRM - 10/25/05) + * - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated + * sanity checking macros. These macro are used to update the size of + * a DLL when one of its entries changes size. * - * slist_len: Number of entries currently in the skip list - * used to maintain a sorted list of dirty entries in the - * cache. + * JRM - 9/8/05 * - * slist_size: Number of bytes of cache entries currently stored in the - * skip list used to maintain a sorted list of - * dirty entries in the cache. - * - * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted - * list of dirty entries in the cache. This sorted list has - * two uses: - * - * a) It allows us to flush dirty entries in increasing address - * order, which results in significant savings. - * - * b) It facilitates checking for adjacent dirty entries when - * attempting to evict entries from the cache. While we - * don't use this at present, I hope that this will allow - * some optimizations when I get to it. - * - * num_last_entries: The number of entries in the cache that can only be - * flushed after all other entries in the cache have - * been flushed. At this time, this will only ever be - * one entry (the superblock), and the code has been - * protected with HDasserts to enforce this. This restraint - * can certainly be relaxed in the future if the need for - * multiple entries being flushed last arises, though - * explicit tests for that case should be added when said - * HDasserts are removed. - * - * With the addition of the fractal heap, the cache must now deal with - * the case in which entries may be dirtied, moved, or have their sizes - * changed during a flush. To allow sanity checks in this situation, the - * following two fields have been added. They are only compiled in when - * H5C_DO_SANITY_CHECKS is TRUE. - * - * slist_len_increase: Number of entries that have been added to the - * slist since the last time this field was set to zero. - * - * slist_size_increase: Total size of all entries that have been added - * to the slist since the last time this field was set to - * zero. - * - * - * When a cache entry is protected, it must be removed from the LRU - * list(s) as it cannot be either flushed or evicted until it is unprotected. - * The following fields are used to implement the protected list (pl). - * - * pl_len: Number of entries currently residing on the protected list. - * - * pl_size: Number of bytes of cache entries currently residing on the - * protected list. - * - * pl_head_ptr: Pointer to the head of the doubly linked list of protected - * entries. Note that cache entries on this list are linked - * by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected - * entries. Note that cache entries on this list are linked - * by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * - * For very frequently used entries, the protect/unprotect overhead can - * become burdensome. To avoid this overhead, I have modified the cache - * to allow entries to be "pinned". A pinned entry is similar to a - * protected entry, in the sense that it cannot be evicted, and that - * the entry can be modified at any time. - * - * Pinning an entry has the following implications: - * - * 1) A pinned entry cannot be evicted. Thus unprotected - * pinned entries reside in the pinned entry list, instead - * of the LRU list(s) (or other lists maintained by the current - * replacement policy code). - * - * 2) A pinned entry can be accessed or modified at any time. - * Therefore, the cache must check with the entry owner - * before flushing it. If permission is denied, the - * cache just skips the entry in the flush. - * - * 3) A pinned entry can be marked as dirty (and possibly - * change size) while it is unprotected. - * - * 4) The flush-destroy code must allow pinned entries to - * be unpinned (and possibly unprotected) during the - * flush. - * - * Since pinned entries cannot be evicted, they must be kept on a pinned - * entry list (pel), instead of being entrusted to the replacement policy - * code. - * - * Maintaining the pinned entry list requires the following fields: - * - * pel_len: Number of entries currently residing on the pinned - * entry list. - * - * pel_size: Number of bytes of cache entries currently residing on - * the pinned entry list. - * - * pel_head_ptr: Pointer to the head of the doubly linked list of pinned - * but not protected entries. Note that cache entries on - * this list are linked by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned - * but not protected entries. Note that cache entries on - * this list are linked by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * - * The cache must have a replacement policy, and the fields supporting this - * policy must be accessible from this structure. - * - * While there has been interest in several replacement policies for - * this cache, the initial development schedule is tight. Thus I have - * elected to support only a modified LRU (least recently used) policy - * for the first cut. - * - * To further simplify matters, I have simply included the fields needed - * by the modified LRU in this structure. When and if we add support for - * other policies, it will probably be easiest to just add the necessary - * fields to this structure as well -- we only create one instance of this - * structure per file, so the overhead is not excessive. - * - * - * Fields supporting the modified LRU policy: - * - * See most any OS text for a discussion of the LRU replacement policy. - * - * When operating in parallel mode, we must ensure that a read does not - * cause a write. If it does, the process will hang, as the write will - * be collective and the other processes will not know to participate. - * - * To deal with this issue, I have modified the usual LRU policy by adding - * clean and dirty LRU lists to the usual LRU list. - * - * The clean LRU list is simply the regular LRU list with all dirty cache - * entries removed. - * - * Similarly, the dirty LRU list is the regular LRU list with all the clean - * cache entries removed. - * - * When reading in parallel mode, we evict from the clean LRU list only. - * This implies that we must try to ensure that the clean LRU list is - * reasonably well stocked at all times. - * - * We attempt to do this by trying to flush enough entries on each write - * to keep the cLRU_list_size >= min_clean_size. - * - * Even if we start with a completely clean cache, a sequence of protects - * without unprotects can empty the clean LRU list. In this case, the - * cache must grow temporarily. At the next write, we will attempt to - * evict enough entries to reduce index_size to less than max_cache_size. - * While this will usually be possible, all bets are off if enough entries - * are protected. - * - * Discussions of the individual fields used by the modified LRU replacement - * policy follow: - * - * LRU_list_len: Number of cache entries currently on the LRU list. - * - * Observe that LRU_list_len + pl_len must always equal - * index_len. - * - * LRU_list_size: Number of bytes of cache entries currently residing on the - * LRU list. - * - * Observe that LRU_list_size + pl_size must always equal - * index_size. - * - * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache - * entries on this list are linked by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache - * entries on this list are linked by their next and prev fields. - * - * This field is NULL if the list is empty. - * - * cLRU_list_len: Number of cache entries currently on the clean LRU list. - * - * Observe that cLRU_list_len + dLRU_list_len must always - * equal LRU_list_len. - * - * cLRU_list_size: Number of bytes of cache entries currently residing on - * the clean LRU list. - * - * Observe that cLRU_list_size + dLRU_list_size must always - * equal LRU_list_size. - * - * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. - * - * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. - * - * dLRU_list_len: Number of cache entries currently on the dirty LRU list. - * - * Observe that cLRU_list_len + dLRU_list_len must always - * equal LRU_list_len. - * - * dLRU_list_size: Number of cache entries currently on the dirty LRU list. - * - * Observe that cLRU_list_len + dLRU_list_len must always - * equal LRU_list_len. - * - * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. - * - * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list. - * Cache entries on this list are linked by their aux_next and - * aux_prev fields. - * - * This field is NULL if the list is empty. - * - * - * Automatic cache size adjustment: - * - * While the default cache size is adequate for most cases, we can run into - * cases where the default is too small. Ideally, we will let the user - * adjust the cache size as required. However, this is not possible in all - * cases. Thus I have added automatic cache size adjustment code. - * - * The configuration for the automatic cache size adjustment is stored in - * the structure described below: - * - * size_increase_possible: Depending on the configuration data given - * in the resize_ctl field, it may or may not be possible - * to increase the size of the cache. Rather than test for - * all the ways this can happen, we simply set this flag when - * we receive a new configuration. - * - * flash_size_increase_possible: Depending on the configuration data given - * in the resize_ctl field, it may or may not be possible - * for a flash size increase to occur. We set this flag - * whenever we receive a new configuration so as to avoid - * repeated calculations. - * - * flash_size_increase_threshold: If a flash cache size increase is possible, - * this field is used to store the minimum size of a new entry - * or size increase needed to trigger a flash cache size - * increase. Note that this field must be updated whenever - * the size of the cache is changed. - * - * size_decrease_possible: Depending on the configuration data given - * in the resize_ctl field, it may or may not be possible - * to decrease the size of the cache. Rather than test for - * all the ways this can happen, we simply set this flag when - * we receive a new configuration. - * - * cache_full: Boolean flag used to keep track of whether the cache is - * full, so we can refrain from increasing the size of a - * cache which hasn't used up the space allotted to it. - * - * The field is initialized to FALSE, and then set to TRUE - * whenever we attempt to make space in the cache. - * - * resize_enabled: This is another convenience flag which is set whenever - * a new set of values for resize_ctl are provided. Very - * simply, - * - * resize_enabled = size_increase_possible || - * size_decrease_possible; - * - * size_decreased: Boolean flag set to TRUE whenever the maximum cache - * size is decreased. The flag triggers a call to - * H5C_make_space_in_cache() on the next call to H5C_protect(). - * - * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration - * data for automatic cache resizing. - * - * epoch_markers_active: Integer field containing the number of epoch - * markers currently in use in the LRU list. This value - * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1]. - * - * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS. - * This array is used to track which epoch markers are currently - * in use. - * - * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1. - * - * To manage the epoch marker cache entries, it is necessary - * to track their order in the LRU list. This is done with - * epoch_marker_ringbuf. When markers are inserted at the - * head of the LRU list, the index of the marker in the - * epoch_markers array is inserted at the tail of the ring - * buffer. When it becomes the epoch_marker_active'th marker - * in the LRU list, it will have worked its way to the head - * of the ring buffer as well. This allows us to remove it - * without scanning the LRU list if such is required. - * - * epoch_marker_ringbuf_first: Integer field containing the index of the - * first entry in the ring buffer. - * - * epoch_marker_ringbuf_last: Integer field containing the index of the - * last entry in the ring buffer. - * - * epoch_marker_ringbuf_size: Integer field containing the number of entries - * in the ring buffer. - * - * epoch_markers: Array of instances of H5C_cache_entry_t of length - * H5C__MAX_EPOCH_MARKERS. The entries are used as markers - * in the LRU list to identify cache entries that haven't - * been accessed for some (small) specified number of - * epochs. These entries (if any) can then be evicted and - * the cache size reduced -- ideally without evicting any - * of the current working set. Needless to say, the epoch - * length and the number of epochs before an unused entry - * must be chosen so that all, or almost all, the working - * set will be accessed before the limit. - * - * Epoch markers only appear in the LRU list, never in - * the index or slist. While they are of type - * H5C__EPOCH_MARKER_TYPE, and have associated class - * functions, these functions should never be called. - * - * The addr fields of these instances of H5C_cache_entry_t - * are set to the index of the instance in the epoch_markers - * array, the size is set to 0, and the type field points - * to the constant structure epoch_marker_class defined - * in H5C.c. The next and prev fields are used as usual - * to link the entry into the LRU list. - * - * All other fields are unused. - * - * - * Cache hit rate collection fields: - * - * We supply the current cache hit rate on request, so we must keep a - * simple cache hit rate computation regardless of whether statistics - * collection is enabled. The following fields support this capability. - * - * cache_hits: Number of cache hits since the last time the cache hit - * rate statistics were reset. Note that when automatic cache - * re-sizing is enabled, this field will be reset every automatic - * resize epoch. - * - * cache_accesses: Number of times the cache has been accessed while - * since the last since the last time the cache hit rate statistics - * were reset. Note that when automatic cache re-sizing is enabled, - * this field will be reset every automatic resize epoch. - * - * - * Statistics collection fields: - * - * When enabled, these fields are used to collect statistics as described - * below. The first set are collected only when H5C_COLLECT_CACHE_STATS - * is true. - * - * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id - * equal to the array index has been in cache when requested in - * the current epoch. - * - * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id - * equal to the array index has not been in cache when - * requested in the current epoch. - * - * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry with - * type id equal to the array index has been write protected - * in the current epoch. - * - * Observe that (hits + misses) = (write_protects + read_protects). - * - * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry with - * type id equal to the array index has been read protected in - * the current epoch. - * - * Observe that (hits + misses) = (write_protects + read_protects). - * - * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to maximum number of simultaneous read - * protects on any entry with type id equal to the array index - * in the current epoch. - * - * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been inserted into the - * cache in the current epoch. - * - * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry - * with type id equal to the array index has been inserted - * pinned into the cache in the current epoch. - * - * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times a dirty entry with type - * id equal to the array index has been cleared in the current - * epoch. - * - * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id - * equal to the array index has been written to disk in the - * current epoch. - * - * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type id - * equal to the array index has been evicted from the cache in - * the current epoch. - * - * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been moved in the current - * epoch. - * - * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry - * with type id equal to the array index has been moved - * during its flush callback in the current epoch. - * - * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry - * with type id equal to the array index has been moved - * during a cache flush in the current epoch. - * - * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been pinned in the current - * epoch. - * - * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been unpinned in the current - * epoch. - * - * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the number of times an entry with type - * id equal to the array index has been marked dirty while pinned - * in the current epoch. - * - * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry - * with type id equal to the array index has been flushed while - * pinned in the current epoch. - * - * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The - * cells are used to record the number of times an entry - * with type id equal to the array index has been cleared while - * pinned in the current epoch. - * - * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry - * with type id equal to the array index has increased in - * size in the current epoch. - * - * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. - * The cells are used to record the number of times an entry - * with type id equal to the array index has decreased in - * size in the current epoch. - * - * entry_flush_size_changes: Array of int64 of length - * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record - * the number of times an entry with type id equal to the - * array index has changed size while in its flush callback. - * - * cache_flush_size_changes: Array of int64 of length - * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record - * the number of times an entry with type id equal to the - * array index has changed size during a cache flush + ****************************************************************************/ + +#if H5C_DO_SANITY_CHECKS + +#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +if ( ( (head_ptr) == NULL ) || \ + ( (tail_ptr) == NULL ) || \ + ( (entry_ptr) == NULL ) || \ + ( (len) <= 0 ) || \ + ( (Size) < (entry_ptr)->size ) || \ + ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \ + ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ + ( ( (len) == 1 ) && \ + ( ! ( ( (head_ptr) == (entry_ptr) ) && \ + ( (tail_ptr) == (entry_ptr) ) && \ + ( (entry_ptr)->next == NULL ) && \ + ( (entry_ptr)->prev == NULL ) && \ + ( (Size) == (entry_ptr)->size ) \ + ) \ + ) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre remove SC failed") \ +} + +#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ +if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ + ( (head_ptr) != (tail_ptr) ) \ + ) || \ + ( (len) < 0 ) || \ + ( (Size) < 0 ) || \ + ( ( (len) == 1 ) && \ + ( ( (head_ptr) != (tail_ptr) ) || \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ) \ + ) || \ + ( ( (len) >= 1 ) && \ + ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ + ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ + ) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL sanity check failed") \ +} + +#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +if ( ( (entry_ptr) == NULL ) || \ + ( (entry_ptr)->next != NULL ) || \ + ( (entry_ptr)->prev != NULL ) || \ + ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ + ( (head_ptr) != (tail_ptr) ) \ + ) || \ + ( (len) < 0 ) || \ + ( ( (len) == 1 ) && \ + ( ( (head_ptr) != (tail_ptr) ) || \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ) \ + ) || \ + ( ( (len) >= 1 ) && \ + ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ + ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ + ) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre insert SC failed") \ +} + +#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ +if ( ( (dll_len) <= 0 ) || \ + ( (dll_size) <= 0 ) || \ + ( (old_size) <= 0 ) || \ + ( (old_size) > (dll_size) ) || \ + ( (new_size) <= 0 ) || \ + ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre size update SC failed") \ +} + +#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ +if ( ( (new_size) > (dll_size) ) || \ + ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL post size update SC failed") \ +} + +#else /* H5C_DO_SANITY_CHECKS */ + +#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) +#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) +#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) +#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) +#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) + +#endif /* H5C_DO_SANITY_CHECKS */ + + +#define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +{ \ + H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + fail_val) \ + if ( (head_ptr) == NULL ) \ + { \ + (head_ptr) = (entry_ptr); \ + (tail_ptr) = (entry_ptr); \ + } \ + else \ + { \ + (tail_ptr)->next = (entry_ptr); \ + (entry_ptr)->prev = (tail_ptr); \ + (tail_ptr) = (entry_ptr); \ + } \ + (len)++; \ + (Size) += (entry_ptr)->size; \ +} /* H5C__DLL_APPEND() */ + +#define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +{ \ + H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + fail_val) \ + if ( (head_ptr) == NULL ) \ + { \ + (head_ptr) = (entry_ptr); \ + (tail_ptr) = (entry_ptr); \ + } \ + else \ + { \ + (head_ptr)->prev = (entry_ptr); \ + (entry_ptr)->next = (head_ptr); \ + (head_ptr) = (entry_ptr); \ + } \ + (len)++; \ + (Size) += entry_ptr->size; \ +} /* H5C__DLL_PREPEND() */ + +#define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +{ \ + H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + fail_val) \ + { \ + if ( (head_ptr) == (entry_ptr) ) \ + { \ + (head_ptr) = (entry_ptr)->next; \ + if ( (head_ptr) != NULL ) \ + (head_ptr)->prev = NULL; \ + } \ + else \ + (entry_ptr)->prev->next = (entry_ptr)->next; \ + if ( (tail_ptr) == (entry_ptr) ) \ + { \ + (tail_ptr) = (entry_ptr)->prev; \ + if ( (tail_ptr) != NULL ) \ + (tail_ptr)->next = NULL; \ + } \ + else \ + (entry_ptr)->next->prev = (entry_ptr)->prev; \ + entry_ptr->next = NULL; \ + entry_ptr->prev = NULL; \ + (len)--; \ + (Size) -= entry_ptr->size; \ + } \ +} /* H5C__DLL_REMOVE() */ + +#define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size) \ +{ \ + H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ + (dll_size) -= (old_size); \ + (dll_size) += (new_size); \ + H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ +} /* H5C__DLL_UPDATE_FOR_SIZE_CHANGE() */ + +#if H5C_DO_SANITY_CHECKS + +#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ +if ( ( (hd_ptr) == NULL ) || \ + ( (tail_ptr) == NULL ) || \ + ( (entry_ptr) == NULL ) || \ + ( (len) <= 0 ) || \ + ( (Size) < (entry_ptr)->size ) || \ + ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ + ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ + ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ + ( ( (len) == 1 ) && \ + ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ + ( (entry_ptr)->aux_next == NULL ) && \ + ( (entry_ptr)->aux_prev == NULL ) && \ + ( (Size) == (entry_ptr)->size ) \ + ) \ + ) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "aux DLL pre remove SC failed") \ +} + +#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ +if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ + ( (head_ptr) != (tail_ptr) ) \ + ) || \ + ( (len) < 0 ) || \ + ( (Size) < 0 ) || \ + ( ( (len) == 1 ) && \ + ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ + ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ + ) \ + ) || \ + ( ( (len) >= 1 ) && \ + ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \ + ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ + ) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL sanity check failed") \ +} + +#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ +if ( ( (entry_ptr) == NULL ) || \ + ( (entry_ptr)->aux_next != NULL ) || \ + ( (entry_ptr)->aux_prev != NULL ) || \ + ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ + ( (hd_ptr) != (tail_ptr) ) \ + ) || \ + ( (len) < 0 ) || \ + ( ( (len) == 1 ) && \ + ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ + ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ + ) \ + ) || \ + ( ( (len) >= 1 ) && \ + ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->aux_prev != NULL ) || \ + ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ + ) \ + ) \ + ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL pre insert SC failed") \ +} + +#else /* H5C_DO_SANITY_CHECKS */ + +#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) +#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) +#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) + +#endif /* H5C_DO_SANITY_CHECKS */ + + +#define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\ +{ \ + H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ + fail_val) \ + if ( (head_ptr) == NULL ) \ + { \ + (head_ptr) = (entry_ptr); \ + (tail_ptr) = (entry_ptr); \ + } \ + else \ + { \ + (tail_ptr)->aux_next = (entry_ptr); \ + (entry_ptr)->aux_prev = (tail_ptr); \ + (tail_ptr) = (entry_ptr); \ + } \ + (len)++; \ + (Size) += entry_ptr->size; \ +} /* H5C__AUX_DLL_APPEND() */ + +#define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +{ \ + H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ + if ( (head_ptr) == NULL ) \ + { \ + (head_ptr) = (entry_ptr); \ + (tail_ptr) = (entry_ptr); \ + } \ + else \ + { \ + (head_ptr)->aux_prev = (entry_ptr); \ + (entry_ptr)->aux_next = (head_ptr); \ + (head_ptr) = (entry_ptr); \ + } \ + (len)++; \ + (Size) += entry_ptr->size; \ +} /* H5C__AUX_DLL_PREPEND() */ + +#define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ +{ \ + H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ + { \ + if ( (head_ptr) == (entry_ptr) ) \ + { \ + (head_ptr) = (entry_ptr)->aux_next; \ + if ( (head_ptr) != NULL ) \ + (head_ptr)->aux_prev = NULL; \ + } \ + else \ + (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \ + if ( (tail_ptr) == (entry_ptr) ) \ + { \ + (tail_ptr) = (entry_ptr)->aux_prev; \ + if ( (tail_ptr) != NULL ) \ + (tail_ptr)->aux_next = NULL; \ + } \ + else \ + (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \ + entry_ptr->aux_next = NULL; \ + entry_ptr->aux_prev = NULL; \ + (len)--; \ + (Size) -= entry_ptr->size; \ + } \ +} /* H5C__AUX_DLL_REMOVE() */ + + +/*********************************************************************** * - * total_ht_insertions: Number of times entries have been inserted into the - * hash table in the current epoch. + * Stats collection macros * - * total_ht_deletions: Number of times entries have been deleted from the - * hash table in the current epoch. + * The following macros must handle stats collection when this collection + * is enabled, and evaluate to the empty string when it is not. * - * successful_ht_searches: int64 containing the total number of successful - * searches of the hash table in the current epoch. + * The sole exception to this rule is + * H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as + * the cache hit rate stats are always collected and available. * - * total_successful_ht_search_depth: int64 containing the total number of - * entries other than the targets examined in successful - * searches of the hash table in the current epoch. + * Changes: * - * failed_ht_searches: int64 containing the total number of unsuccessful - * searches of the hash table in the current epoch. + * JRM -- 3/21/06 + * Added / updated macros for pinned entry related stats. * - * total_failed_ht_search_depth: int64 containing the total number of - * entries examined in unsuccessful searches of the hash - * table in the current epoch. + * JRM -- 8/9/06 + * More pinned entry stats related updates. * - * max_index_len: Largest value attained by the index_len field in the - * current epoch. + * JRM -- 3/31/07 + * Updated H5C__UPDATE_STATS_FOR_PROTECT() to keep stats on + * read and write protects. * - * max_index_size: Largest value attained by the index_size field in the - * current epoch. + * MAM -- 1/15/09 + * Created H5C__UPDATE_MAX_INDEX_SIZE_STATS to contain + * common code within macros that update the maximum + * index, clean_index, and dirty_index statistics fields. * - * max_clean_index_size: Largest value attained by the clean_index_size field - * in the current epoch. + ***********************************************************************/ + +#define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ + (cache_ptr->cache_accesses)++; \ + if ( hit ) { \ + (cache_ptr->cache_hits)++; \ + } \ + +#if H5C_COLLECT_CACHE_STATS + +#define H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ + (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ + if ( (cache_ptr)->clean_index_size > \ + (cache_ptr)->max_clean_index_size ) \ + (cache_ptr)->max_clean_index_size = \ + (cache_ptr)->clean_index_size; \ + if ( (cache_ptr)->dirty_index_size > \ + (cache_ptr)->max_dirty_index_size ) \ + (cache_ptr)->max_dirty_index_size = \ + (cache_ptr)->dirty_index_size; + +#define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \ + (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++; + +#define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \ + if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ + (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ + if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; + +#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \ + if ( cache_ptr->flush_in_progress ) { \ + ((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \ + } \ + if ( entry_ptr->flush_in_progress ) { \ + ((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \ + } \ + (((cache_ptr)->moves)[(entry_ptr)->type->id])++; + +#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\ + if ( cache_ptr->flush_in_progress ) { \ + ((cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id])++; \ + } \ + if ( entry_ptr->flush_in_progress ) { \ + ((cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id])++; \ + } \ + if ( (entry_ptr)->size < (new_size) ) { \ + ((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ + } else if ( (entry_ptr)->size > (new_size) ) { \ + ((cache_ptr)->size_decreases[(entry_ptr)->type->id])++; \ + } + +#define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ + (cache_ptr)->total_ht_insertions++; + +#define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ + (cache_ptr)->total_ht_deletions++; + +#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \ + if ( success ) { \ + (cache_ptr)->successful_ht_searches++; \ + (cache_ptr)->total_successful_ht_search_depth += depth; \ + } else { \ + (cache_ptr)->failed_ht_searches++; \ + (cache_ptr)->total_failed_ht_search_depth += depth; \ + } + +#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \ + ((cache_ptr)->unpins)[(entry_ptr)->type->id]++; + +#if H5C_COLLECT_CACHE_ENTRY_STATS + +#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \ +{ \ + (entry_ptr)->accesses = 0; \ + (entry_ptr)->clears = 0; \ + (entry_ptr)->flushes = 0; \ + (entry_ptr)->pins = 0; \ +} + +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ +{ \ + (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) \ + (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ + ((entry_ptr)->clears)++; \ +} + +#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ +{ \ + (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) \ + (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ + ((entry_ptr)->flushes)++; \ +} + +#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \ +{ \ + (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->accesses > \ + ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] = \ + (entry_ptr)->accesses; \ + if ( (entry_ptr)->accesses < \ + ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] = \ + (entry_ptr)->accesses; \ + if ( (entry_ptr)->clears > \ + ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \ + = (entry_ptr)->clears; \ + if ( (entry_ptr)->flushes > \ + ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \ + = (entry_ptr)->flushes; \ + if ( (entry_ptr)->size > \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ + = (entry_ptr)->size; \ + if ( (entry_ptr)->pins > \ + ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \ + = (entry_ptr)->pins; \ +} + +#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ +{ \ + (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) { \ + (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ + ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + (entry_ptr)->pins++; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ + } \ + if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ + (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ + if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ + if ( (entry_ptr)->size > \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ + = (entry_ptr)->size; \ +} + +#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ +{ \ + if ( hit ) \ + ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ + else \ + ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ + if ( ! ((entry_ptr)->is_read_only) ) { \ + ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ + } else { \ + ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ + if ( ((entry_ptr)->ro_ref_count) > \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ + ((entry_ptr)->ro_ref_count); \ + } \ + if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ + (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ + if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ + if ( (entry_ptr)->size > \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_size)[(entry_ptr)->type->id] = (entry_ptr)->size; \ + ((entry_ptr)->accesses)++; \ +} + +#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ +{ \ + ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + (entry_ptr)->pins++; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ +} + +#else /* H5C_COLLECT_CACHE_ENTRY_STATS */ + +#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) + +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ +{ \ + if ( (entry_ptr)->is_pinned ) \ + (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ + (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ +} + +#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ +{ \ + (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) \ + (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ +} + +#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \ + (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; + +#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ +{ \ + (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ + if ( (entry_ptr)->is_pinned ) { \ + (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ + ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ + } \ + if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ + (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ + if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ + (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ +} + +#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ +{ \ + if ( hit ) \ + ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ + else \ + ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ + if ( ! ((entry_ptr)->is_read_only) ) \ + ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ + else { \ + ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ + if ( ((entry_ptr)->ro_ref_count) > \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ + ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ + ((entry_ptr)->ro_ref_count); \ + } \ + if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ + (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ + H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ + if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ + (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ + if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ + (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ +} + +#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ +{ \ + ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ + if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ + (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ + if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ + (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ +} + +#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ + +#else /* H5C_COLLECT_CACHE_STATS */ + +#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) +#define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) +#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) +#define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) +#define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) +#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) +#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) +#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) +#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) + +#endif /* H5C_COLLECT_CACHE_STATS */ + + +/*********************************************************************** * - * max_dirty_index_size: Largest value attained by the dirty_index_size field - * in the current epoch. + * Hash table access and manipulation macros: * - * max_slist_len: Largest value attained by the slist_len field in the - * current epoch. + * The following macros handle searches, insertions, and deletion in + * the hash table. * - * max_slist_size: Largest value attained by the slist_size field in the - * current epoch. + * When modifying these macros, remember to modify the similar macros + * in tst/cache.c * - * max_pl_len: Largest value attained by the pl_len field in the - * current epoch. + * Changes: * - * max_pl_size: Largest value attained by the pl_size field in the - * current epoch. + * - Updated existing index macros and sanity check macros to maintain + * the clean_index_size and dirty_index_size fields of H5C_t. Also + * added macros to allow us to track entry cleans and dirties. * - * max_pel_len: Largest value attained by the pel_len field in the - * current epoch. + * JRM -- 11/5/08 * - * max_pel_size: Largest value attained by the pel_size field in the - * current epoch. + ***********************************************************************/ + +/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */ + +#define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) + +#define H5C__HASH_FCN(x) (int)(((x) & H5C__HASH_MASK) >> 3) + +#if H5C_DO_SANITY_CHECKS + +#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ +if ( ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ + ( (entry_ptr) == NULL ) || \ + ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ + ( (entry_ptr)->ht_next != NULL ) || \ + ( (entry_ptr)->ht_prev != NULL ) || \ + ( (entry_ptr)->size <= 0 ) || \ + ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \ + ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + \ + (cache_ptr)->dirty_index_size) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \ + "Pre HT insert SC failed") \ +} + +#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ +if ( ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ + ( (cache_ptr)->index_len < 1 ) || \ + ( (entry_ptr) == NULL ) || \ + ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ + ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ + ( (entry_ptr)->size <= 0 ) || \ + ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \ + ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \ + ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ + == NULL ) || \ + ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ + != (entry_ptr) ) && \ + ( (entry_ptr)->ht_prev == NULL ) ) || \ + ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \ + (entry_ptr) ) && \ + ( (entry_ptr)->ht_prev != NULL ) ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + \ + (cache_ptr)->dirty_index_size) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \ +} + +/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ +#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ +if ( ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ + ( ! H5F_addr_defined(Addr) ) || \ + ( H5C__HASH_FCN(Addr) < 0 ) || \ + ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT search SC failed") \ +} + +/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ +#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \ +if ( ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ + ( (cache_ptr)->index_len < 1 ) || \ + ( (entry_ptr) == NULL ) || \ + ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ + ( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \ + ( (entry_ptr)->size <= 0 ) || \ + ( ((cache_ptr)->index)[k] == NULL ) || \ + ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \ + ( (entry_ptr)->ht_prev == NULL ) ) || \ + ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \ + ( (entry_ptr)->ht_prev != NULL ) ) || \ + ( ( (entry_ptr)->ht_prev != NULL ) && \ + ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \ + ( ( (entry_ptr)->ht_next != NULL ) && \ + ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \ + "Post successful HT search SC failed") \ +} + +/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */ +#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ +if ( ( (cache_ptr) == NULL ) || \ + ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \ + ( (entry_ptr)->ht_prev != NULL ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \ + "Post HT shift to front SC failed") \ +} + +#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr, was_clean) \ +if ( ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->index_len <= 0 ) || \ + ( (cache_ptr)->index_size <= 0 ) || \ + ( (new_size) <= 0 ) || \ + ( (old_size) > (cache_ptr)->index_size ) || \ + ( (new_size) <= 0 ) || \ + ( ( (cache_ptr)->index_len == 1 ) && \ + ( (cache_ptr)->index_size != (old_size) ) ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + \ + (cache_ptr)->dirty_index_size) ) || \ + ( ( !( was_clean ) || \ + ( (cache_ptr)->clean_index_size < (old_size) ) ) && \ + ( ( (was_clean) ) || \ + ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \ + ( (entry_ptr) == NULL ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "Pre HT entry size change SC failed") \ +} + +#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr) \ +if ( ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->index_len <= 0 ) || \ + ( (cache_ptr)->index_size <= 0 ) || \ + ( (new_size) > (cache_ptr)->index_size ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + \ + (cache_ptr)->dirty_index_size) ) || \ + ( ( !((entry_ptr)->is_dirty ) || \ + ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \ + ( ( ((entry_ptr)->is_dirty) ) || \ + ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \ + ( ( (cache_ptr)->index_len == 1 ) && \ + ( (cache_ptr)->index_size != (new_size) ) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "Post HT entry size change SC failed") \ +} + +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ +if ( \ + ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ + ( (cache_ptr)->index_len <= 0 ) || \ + ( (entry_ptr) == NULL ) || \ + ( (entry_ptr)->is_dirty != FALSE ) || \ + ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ + ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "Pre HT update for entry clean SC failed") \ +} + +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ +if ( \ + ( (cache_ptr) == NULL ) || \ + ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ + ( (cache_ptr)->index_len <= 0 ) || \ + ( (entry_ptr) == NULL ) || \ + ( (entry_ptr)->is_dirty != TRUE ) || \ + ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ + ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \ + ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "Pre HT update for entry dirty SC failed") \ +} + +#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ +if ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "Post HT update for entry clean SC failed") \ +} + +#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ +if ( (cache_ptr)->index_size != \ + ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \ + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "Post HT update for entry dirty SC failed") \ +} + +#else /* H5C_DO_SANITY_CHECKS */ + +#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) +#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) +#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) +#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) +#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) +#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) +#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr, was_clean) +#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr) +#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) +#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) + +#endif /* H5C_DO_SANITY_CHECKS */ + + +#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \ +{ \ + int k; \ + H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ + k = H5C__HASH_FCN((entry_ptr)->addr); \ + if ( ((cache_ptr)->index)[k] == NULL ) \ + ((cache_ptr)->index)[k] = (entry_ptr); \ + else { \ + (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ + (entry_ptr)->ht_next->ht_prev = (entry_ptr); \ + ((cache_ptr)->index)[k] = (entry_ptr); \ + } \ + (cache_ptr)->index_len++; \ + (cache_ptr)->index_size += (entry_ptr)->size; \ + if ( (entry_ptr)->is_dirty ) \ + (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ + else \ + (cache_ptr)->clean_index_size += (entry_ptr)->size; \ + if ((entry_ptr)->flush_me_last) { \ + (cache_ptr)->num_last_entries++; \ + HDassert((cache_ptr)->num_last_entries == 1); \ + } \ + H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ +} + +#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \ +{ \ + int k; \ + H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ + k = H5C__HASH_FCN((entry_ptr)->addr); \ + if ( (entry_ptr)->ht_next ) \ + (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ + if ( (entry_ptr)->ht_prev ) \ + (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ + if ( ((cache_ptr)->index)[k] == (entry_ptr) ) \ + ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \ + (entry_ptr)->ht_next = NULL; \ + (entry_ptr)->ht_prev = NULL; \ + (cache_ptr)->index_len--; \ + (cache_ptr)->index_size -= (entry_ptr)->size; \ + if ( (entry_ptr)->is_dirty ) \ + (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ + else \ + (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ + if ((entry_ptr)->flush_me_last) { \ + (cache_ptr)->num_last_entries--; \ + HDassert((cache_ptr)->num_last_entries == 0); \ + } \ + H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ +} + +#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \ +{ \ + int k; \ + int depth = 0; \ + H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ + k = H5C__HASH_FCN(Addr); \ + entry_ptr = ((cache_ptr)->index)[k]; \ + while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \ + (entry_ptr) = (entry_ptr)->ht_next; \ + (depth)++; \ + } \ + if ( entry_ptr ) { \ + H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \ + if ( entry_ptr != ((cache_ptr)->index)[k] ) { \ + if ( (entry_ptr)->ht_next ) \ + (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ + HDassert( (entry_ptr)->ht_prev != NULL ); \ + (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ + ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ + (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ + (entry_ptr)->ht_prev = NULL; \ + ((cache_ptr)->index)[k] = (entry_ptr); \ + H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ + } \ + } \ + H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \ +} + +#define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \ +{ \ + int k; \ + int depth = 0; \ + H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ + k = H5C__HASH_FCN(Addr); \ + entry_ptr = ((cache_ptr)->index)[k]; \ + while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \ + (entry_ptr) = (entry_ptr)->ht_next; \ + (depth)++; \ + } \ + if ( entry_ptr ) { \ + H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \ + if ( entry_ptr != ((cache_ptr)->index)[k] ) { \ + if ( (entry_ptr)->ht_next ) \ + (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ + HDassert( (entry_ptr)->ht_prev != NULL ); \ + (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ + ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ + (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ + (entry_ptr)->ht_prev = NULL; \ + ((cache_ptr)->index)[k] = (entry_ptr); \ + H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ + } \ + } \ +} + +#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \ +{ \ + H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ + (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ + (cache_ptr)->clean_index_size += (entry_ptr)->size; \ + H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ +} + +#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) \ +{ \ + H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ + (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ + (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ + H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ +} + +#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \ + entry_ptr, was_clean) \ +{ \ + H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr, was_clean) \ + (cache_ptr)->index_size -= (old_size); \ + (cache_ptr)->index_size += (new_size); \ + if ( was_clean ) \ + (cache_ptr)->clean_index_size -= (old_size); \ + else \ + (cache_ptr)->dirty_index_size -= (old_size); \ + if ( (entry_ptr)->is_dirty ) \ + (cache_ptr)->dirty_index_size += (new_size); \ + else \ + (cache_ptr)->clean_index_size += (new_size); \ + H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ + entry_ptr) \ +} + + +/************************************************************************** * - * calls_to_msic: Total number of calls to H5C_make_space_in_cache + * Skip list insertion and deletion macros: * - * total_entries_skipped_in_msic: Number of clean entries skipped while - * enforcing the min_clean_fraction in H5C_make_space_in_cache(). + * These used to be functions, but I converted them to macros to avoid some + * function call overhead. * - * total_entries_scanned_in_msic: Number of clean entries skipped while - * enforcing the min_clean_fraction in H5C_make_space_in_cache(). + **************************************************************************/ + +/*------------------------------------------------------------------------- * - * max_entries_skipped_in_msic: Maximum number of clean entries skipped - * in any one call to H5C_make_space_in_cache(). + * Macro: H5C__INSERT_ENTRY_IN_SLIST * - * max_entries_scanned_in_msic: Maximum number of entries scanned over - * in any one call to H5C_make_space_in_cache(). + * Purpose: Insert the specified instance of H5C_cache_entry_t into + * the skip list in the specified instance of H5C_t. Update + * the associated length and size fields. * - * entries_scanned_to_make_space: Number of entries scanned only when looking - * for entries to evict in order to make space in cache. - - * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS - * and H5C_COLLECT_CACHE_ENTRY_STATS are true. + * Return: N/A * - * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times any single - * entry with type id equal to the array index has been - * accessed in the current epoch. + * Programmer: John Mainzer, 5/10/04 * - * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the minimum number of times any single - * entry with type id equal to the array index has been - * accessed in the current epoch. + * Modifications: * - * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times any single - * entry with type id equal to the array index has been cleared - * in the current epoch. + * JRM -- 7/21/04 + * Updated function to set the in_tree flag when inserting + * an entry into the tree. Also modified the function to + * update the tree size and len fields instead of the similar + * index fields. * - * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times any single - * entry with type id equal to the array index has been - * flushed in the current epoch. + * All of this is part of the modifications to support the + * hash table. * - * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum size of any single entry - * with type id equal to the array index that has resided in - * the cache in the current epoch. + * JRM -- 7/27/04 + * Converted the function H5C_insert_entry_in_tree() into + * the macro H5C__INSERT_ENTRY_IN_TREE in the hopes of + * wringing a little more speed out of the cache. * - * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells - * are used to record the maximum number of times that any single - * entry with type id equal to the array index that has been - * marked as pinned in the cache in the current epoch. + * Note that we don't bother to check if the entry is already + * in the tree -- if it is, H5SL_insert() will fail. * + * QAK -- 11/27/04 + * Switched over to using skip list routines. * - * Fields supporting testing: + * JRM -- 6/27/06 + * Added fail_val parameter. * - * prefix Array of char used to prefix debugging output. The - * field is intended to allow marking of output of with - * the processes mpi rank. + * JRM -- 8/25/06 + * Added the H5C_DO_SANITY_CHECKS version of the macro. * - * get_entry_ptr_from_addr_counter: Counter used to track the number of - * times the H5C_get_entry_ptr_from_addr() function has been - * called successfully. This field is only defined when - * NDEBUG is not #defined. + * This version maintains the slist_len_increase and + * slist_size_increase fields that are used in sanity + * checks in the flush routines. * - ****************************************************************************/ - -#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */ - -#define H5C__H5C_T_MAGIC 0x005CAC0E -#define H5C__MAX_NUM_TYPE_IDS 27 -#define H5C__PREFIX_LEN 32 - -struct H5C_t -{ - uint32_t magic; - - hbool_t flush_in_progress; - - FILE * trace_file_ptr; - - void * aux_ptr; - - int32_t max_type_id; - const char * (* type_name_table_ptr); - - size_t max_cache_size; - size_t min_clean_size; - - H5C_write_permitted_func_t check_write_permitted; - hbool_t write_permitted; - - H5C_log_flush_func_t log_flush; - - hbool_t evictions_enabled; - - int32_t index_len; - size_t index_size; - size_t clean_index_size; - size_t dirty_index_size; - H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]); - - hbool_t ignore_tags; + * All this is needed as the fractal heap needs to be + * able to dirty, resize and/or move entries during the + * flush. + * + *------------------------------------------------------------------------- + */ - int32_t slist_len; - size_t slist_size; - H5SL_t * slist_ptr; - int32_t num_last_entries; #if H5C_DO_SANITY_CHECKS - int64_t slist_len_increase; - int64_t slist_size_increase; -#endif /* H5C_DO_SANITY_CHECKS */ - - int32_t pl_len; - size_t pl_size; - H5C_cache_entry_t * pl_head_ptr; - H5C_cache_entry_t * pl_tail_ptr; - - int32_t pel_len; - size_t pel_size; - H5C_cache_entry_t * pel_head_ptr; - H5C_cache_entry_t * pel_tail_ptr; - - int32_t LRU_list_len; - size_t LRU_list_size; - H5C_cache_entry_t * LRU_head_ptr; - H5C_cache_entry_t * LRU_tail_ptr; - - int32_t cLRU_list_len; - size_t cLRU_list_size; - H5C_cache_entry_t * cLRU_head_ptr; - H5C_cache_entry_t * cLRU_tail_ptr; - - int32_t dLRU_list_len; - size_t dLRU_list_size; - H5C_cache_entry_t * dLRU_head_ptr; - H5C_cache_entry_t * dLRU_tail_ptr; - - hbool_t size_increase_possible; - hbool_t flash_size_increase_possible; - size_t flash_size_increase_threshold; - hbool_t size_decrease_possible; - hbool_t resize_enabled; - hbool_t cache_full; - hbool_t size_decreased; - H5C_auto_size_ctl_t resize_ctl; - - int32_t epoch_markers_active; - hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]; - int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]; - int32_t epoch_marker_ringbuf_first; - int32_t epoch_marker_ringbuf_last; - int32_t epoch_marker_ringbuf_size; - H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]; - - int64_t cache_hits; - int64_t cache_accesses; - -#if H5C_COLLECT_CACHE_STATS - - /* stats fields */ - int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; - int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; - - int64_t total_ht_insertions; - int64_t total_ht_deletions; - int64_t successful_ht_searches; - int64_t total_successful_ht_search_depth; - int64_t failed_ht_searches; - int64_t total_failed_ht_search_depth; - - int32_t max_index_len; - size_t max_index_size; - size_t max_clean_index_size; - size_t max_dirty_index_size; - - int32_t max_slist_len; - size_t max_slist_size; - - int32_t max_pl_len; - size_t max_pl_size; - - int32_t max_pel_len; - size_t max_pel_size; - - int64_t calls_to_msic; - int64_t total_entries_skipped_in_msic; - int64_t total_entries_scanned_in_msic; - int32_t max_entries_skipped_in_msic; - int32_t max_entries_scanned_in_msic; - int64_t entries_scanned_to_make_space; - -#if H5C_COLLECT_CACHE_ENTRY_STATS - - int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; - size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1]; - int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1]; - -#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ - -#endif /* H5C_COLLECT_CACHE_STATS */ - char prefix[H5C__PREFIX_LEN]; +#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( (entry_ptr)->size > 0 ); \ + HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ + HDassert( !((entry_ptr)->in_slist) ); \ + \ + if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \ + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ + "Can't insert entry in skip list") \ + \ + (entry_ptr)->in_slist = TRUE; \ + (cache_ptr)->slist_len++; \ + (cache_ptr)->slist_size += (entry_ptr)->size; \ + (cache_ptr)->slist_len_increase++; \ + (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \ + \ + HDassert( (cache_ptr)->slist_len > 0 ); \ + HDassert( (cache_ptr)->slist_size > 0 ); \ + \ +} /* H5C__INSERT_ENTRY_IN_SLIST */ -#ifndef NDEBUG +#else /* H5C_DO_SANITY_CHECKS */ - int64_t get_entry_ptr_from_addr_counter; +#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( (entry_ptr)->size > 0 ); \ + HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ + HDassert( !((entry_ptr)->in_slist) ); \ + \ + if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \ + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ + "Can't insert entry in skip list") \ + \ + (entry_ptr)->in_slist = TRUE; \ + (cache_ptr)->slist_len++; \ + (cache_ptr)->slist_size += (entry_ptr)->size; \ + \ + HDassert( (cache_ptr)->slist_len > 0 ); \ + HDassert( (cache_ptr)->slist_size > 0 ); \ + \ +} /* H5C__INSERT_ENTRY_IN_SLIST */ -#endif /* NDEBUG */ -}; +#endif /* H5C_DO_SANITY_CHECKS */ -/****************************************************************************/ -/***************************** Macro Definitions ****************************/ -/****************************************************************************/ - -/**************************************************************************** - * - * We maintain doubly linked lists of instances of H5C_cache_entry_t for a - * variety of reasons -- protected list, LRU list, and the clean and dirty - * LRU lists at present. The following macros support linking and unlinking - * of instances of H5C_cache_entry_t by both their regular and auxilary next - * and previous pointers. - * - * The size and length fields are also maintained. - * - * Note that the relevant pair of prev and next pointers are presumed to be - * NULL on entry in the insertion macros. - * - * Finally, observe that the sanity checking macros evaluate to the empty - * string when H5C_DO_SANITY_CHECKS is FALSE. They also contain calls - * to the HGOTO_ERROR macro, which may not be appropriate in all cases. - * If so, we will need versions of the insertion and deletion macros which - * do not reference the sanity checking macros. - * JRM - 5/5/04 - * - * Changes: - * - * - Removed the line: +/*------------------------------------------------------------------------- * - * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) || + * Function: H5C__REMOVE_ENTRY_FROM_SLIST * - * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the - * epoch markers used in the age out based cache size reduction algorithm, - * this invarient need not hold, as the epoch markers are of size 0. + * Purpose: Remove the specified instance of H5C_cache_entry_t from the + * index skip list in the specified instance of H5C_t. Update + * the associated length and size fields. * - * One could argue that I should have given the epoch markers a positive - * size, but this would break the index_size = LRU_list_size + pl_size - * + pel_size invarient. + * Return: N/A * - * Alternatively, I could pass the current decr_mode in to the macro, - * and just skip the check whenever epoch markers may be in use. + * Programmer: John Mainzer, 5/10/04 * - * However, any size errors should be caught when the cache is flushed - * and destroyed. Until we are tracking such an error, this should be - * good enough. - * JRM - 12/9/04 + * Modifications: * + * JRM -- 7/21/04 + * Updated function for the addition of the hash table. * - * - In the H5C__DLL_PRE_INSERT_SC macro, replaced the lines: + * JRM - 7/27/04 + * Converted from the function H5C_remove_entry_from_tree() + * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of + * wringing a little more performance out of the cache. * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || + * QAK -- 11/27/04 + * Switched over to using skip list routines. * - * with: + * JRM -- 3/28/07 + * Updated sanity checks for the new is_read_only and + * ro_ref_count fields in H5C_cache_entry_t. * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || + *------------------------------------------------------------------------- + */ + +#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + HDassert( (entry_ptr)->in_slist ); \ + HDassert( (cache_ptr)->slist_ptr ); \ + \ + if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \ + != (entry_ptr) ) \ + HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \ + "Can't delete entry from skip list.") \ + \ + HDassert( (cache_ptr)->slist_len > 0 ); \ + (cache_ptr)->slist_len--; \ + HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ + (cache_ptr)->slist_size -= (entry_ptr)->size; \ + (entry_ptr)->in_slist = FALSE; \ +} /* H5C__REMOVE_ENTRY_FROM_SLIST */ + + +/*------------------------------------------------------------------------- * - * Epoch markers have size 0, so we can now have a non-empty list with - * zero size. Hence the "( (Size) <= 0 )" clause cause false failures - * in the sanity check. Since "Size" is typically a size_t, it can't - * take on negative values, and thus the revised clause "( (Size) < 0 )" - * caused compiler warnings. - * JRM - 12/22/04 + * Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE * - * - In the H5C__DLL_SC macro, replaced the lines: + * Purpose: Update cache_ptr->slist_size for a change in the size of + * and entry in the slist. * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || + * Return: N/A * - * with + * Programmer: John Mainzer, 9/07/05 * - * ( ( (len) == 1 ) && - * ( ( (head_ptr) != (tail_ptr) ) || - * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) - * ) - * ) || + * Modifications: * - * Epoch markers have size 0, so we can now have a non-empty list with - * zero size. Hence the "( (Size) <= 0 )" clause cause false failures - * in the sanity check. Since "Size" is typically a size_t, it can't - * take on negative values, and thus the revised clause "( (Size) < 0 )" - * caused compiler warnings. - * JRM - 1/10/05 + * JRM -- 8/27/06 + * Added the H5C_DO_SANITY_CHECKS version of the macro. * - * - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated - * sanity checking macros. These macro are used to update the size of - * a DLL when one of its entries changes size. + * This version maintains the slist_size_increase field + * that are used in sanity checks in the flush routines. * - * JRM - 9/8/05 + * All this is needed as the fractal heap needs to be + * able to dirty, resize and/or move entries during the + * flush. * - ****************************************************************************/ + *------------------------------------------------------------------------- + */ #if H5C_DO_SANITY_CHECKS -#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ -if ( ( (head_ptr) == NULL ) || \ - ( (tail_ptr) == NULL ) || \ - ( (entry_ptr) == NULL ) || \ - ( (len) <= 0 ) || \ - ( (Size) < (entry_ptr)->size ) || \ - ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ - ( ( (len) == 1 ) && \ - ( ! ( ( (head_ptr) == (entry_ptr) ) && \ - ( (tail_ptr) == (entry_ptr) ) && \ - ( (entry_ptr)->next == NULL ) && \ - ( (entry_ptr)->prev == NULL ) && \ - ( (Size) == (entry_ptr)->size ) \ - ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre remove SC failed") \ -} - -#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ -if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( (len) < 0 ) || \ - ( (Size) < 0 ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL sanity check failed") \ -} - -#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ -if ( ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->next != NULL ) || \ - ( (entry_ptr)->prev != NULL ) || \ - ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( (len) < 0 ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre insert SC failed") \ -} +#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (old_size) > 0 ); \ + HDassert( (new_size) > 0 ); \ + HDassert( (old_size) <= (cache_ptr)->slist_size ); \ + HDassert( (cache_ptr)->slist_len > 0 ); \ + HDassert( ((cache_ptr)->slist_len > 1) || \ + ( (cache_ptr)->slist_size == (old_size) ) ); \ + \ + (cache_ptr)->slist_size -= (old_size); \ + (cache_ptr)->slist_size += (new_size); \ + \ + (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \ + (cache_ptr)->slist_size_increase += (int64_t)(new_size); \ + \ + HDassert( (new_size) <= (cache_ptr)->slist_size ); \ + HDassert( ( (cache_ptr)->slist_len > 1 ) || \ + ( (cache_ptr)->slist_size == (new_size) ) ); \ +} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ -#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ -if ( ( (dll_len) <= 0 ) || \ - ( (dll_size) <= 0 ) || \ - ( (old_size) <= 0 ) || \ - ( (old_size) > (dll_size) ) || \ - ( (new_size) <= 0 ) || \ - ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre size update SC failed") \ -} +#else /* H5C_DO_SANITY_CHECKS */ -#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ -if ( ( (new_size) > (dll_size) ) || \ - ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL post size update SC failed") \ -} +#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (old_size) > 0 ); \ + HDassert( (new_size) > 0 ); \ + HDassert( (old_size) <= (cache_ptr)->slist_size ); \ + HDassert( (cache_ptr)->slist_len > 0 ); \ + HDassert( ((cache_ptr)->slist_len > 1) || \ + ( (cache_ptr)->slist_size == (old_size) ) ); \ + \ + (cache_ptr)->slist_size -= (old_size); \ + (cache_ptr)->slist_size += (new_size); \ + \ + HDassert( (new_size) <= (cache_ptr)->slist_size ); \ + HDassert( ( (cache_ptr)->slist_len > 1 ) || \ + ( (cache_ptr)->slist_size == (new_size) ) ); \ +} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ -#else /* H5C_DO_SANITY_CHECKS */ +#endif /* H5C_DO_SANITY_CHECKS */ -#define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) -#define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) -#define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) -#define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) -#define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) + +/************************************************************************** + * + * Replacement policy update macros: + * + * These used to be functions, but I converted them to macros to avoid some + * function call overhead. + * + **************************************************************************/ -#endif /* H5C_DO_SANITY_CHECKS */ +/*------------------------------------------------------------------------- + * + * Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS + * + * Purpose: For efficiency, we sometimes change the order of flushes -- + * but doing so can confuse the replacement policy. This + * macro exists to allow us to specify an entry as the + * most recently touched so we can repair any such + * confusion. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the macro + * should switch on the current policy and act accordingly. + * + * Return: N/A + * + * Programmer: John Mainzer, 10/13/05 + * + * Modifications: + * + * JRM -- 3/20/06 + * Modified macro to ignore pinned entries. Pinned entries + * do not appear in the data structures maintained by the + * replacement policy code, and thus this macro has nothing + * to do if called for such an entry. + * + * JRM -- 3/28/07 + * Added sanity checks using the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * + *------------------------------------------------------------------------- + */ +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +#define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ { \ - H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (tail_ptr)->next = (entry_ptr); \ - (entry_ptr)->prev = (tail_ptr); \ - (tail_ptr) = (entry_ptr); \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( ! ((entry_ptr)->is_pinned) ) { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list, and re-insert it at the head.\ + */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* Use the dirty flag to infer whether the entry is on the clean or \ + * dirty LRU list, and remove it. Then insert it at the head of \ + * the same LRU list. \ + * \ + * At least initially, all entries should be clean. That may \ + * change, so we may as well deal with both cases now. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ } \ - (len)++; \ - (Size) += (entry_ptr)->size; \ -} /* H5C__DLL_APPEND() */ +} /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ -#define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ -{ \ - H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (head_ptr)->prev = (entry_ptr); \ - (entry_ptr)->next = (head_ptr); \ - (head_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (Size) += entry_ptr->size; \ -} /* H5C__DLL_PREPEND() */ +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \ +#define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ { \ - H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ - fail_val) \ - { \ - if ( (head_ptr) == (entry_ptr) ) \ - { \ - (head_ptr) = (entry_ptr)->next; \ - if ( (head_ptr) != NULL ) \ - (head_ptr)->prev = NULL; \ - } \ - else \ - (entry_ptr)->prev->next = (entry_ptr)->next; \ - if ( (tail_ptr) == (entry_ptr) ) \ - { \ - (tail_ptr) = (entry_ptr)->prev; \ - if ( (tail_ptr) != NULL ) \ - (tail_ptr)->next = NULL; \ - } \ - else \ - (entry_ptr)->next->prev = (entry_ptr)->prev; \ - entry_ptr->next = NULL; \ - entry_ptr->prev = NULL; \ - (len)--; \ - (Size) -= entry_ptr->size; \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( ! ((entry_ptr)->is_pinned) ) { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list, and re-insert it at the head \ + */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ } \ -} /* H5C__DLL_REMOVE() */ - -#define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size) \ -{ \ - H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ - (dll_size) -= (old_size); \ - (dll_size) += (new_size); \ - H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \ -} /* H5C__DLL_UPDATE_FOR_SIZE_CHANGE() */ +} /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ -#if H5C_DO_SANITY_CHECKS +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ -if ( ( (hd_ptr) == NULL ) || \ - ( (tail_ptr) == NULL ) || \ - ( (entry_ptr) == NULL ) || \ - ( (len) <= 0 ) || \ - ( (Size) < (entry_ptr)->size ) || \ - ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \ - ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \ - ( ( (len) == 1 ) && \ - ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \ - ( (entry_ptr)->aux_next == NULL ) && \ - ( (entry_ptr)->aux_prev == NULL ) && \ - ( (Size) == (entry_ptr)->size ) \ - ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "aux DLL pre remove SC failed") \ -} + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_EVICTION + * + * Purpose: Update the replacement policy data structures for an + * eviction of the specified cache entry. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. + * + * Return: Non-negative on success/Negative on failure. + * + * Programmer: John Mainzer, 5/10/04 + * + * Modifications: + * + * JRM - 7/27/04 + * Converted the function H5C_update_rp_for_eviction() to the + * macro H5C__UPDATE_RP_FOR_EVICTION in an effort to squeeze + * a bit more performance out of the cache. + * + * At least for the first cut, I am leaving the comments and + * white space in the macro. If they cause dificulties with + * the pre-processor, I'll have to remove them. + * + * JRM - 7/28/04 + * Split macro into two version, one supporting the clean and + * dirty LRU lists, and the other not. Yet another attempt + * at optimization. + * + * JRM - 3/20/06 + * Pinned entries can't be evicted, so this entry should never + * be called on a pinned entry. Added assert to verify this. + * + * JRM -- 3/28/07 + * Added sanity checks for the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * + *------------------------------------------------------------------------- + */ -#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \ -if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (head_ptr) != (tail_ptr) ) \ - ) || \ - ( (len) < 0 ) || \ - ( (Size) < 0 ) || \ - ( ( (len) == 1 ) && \ - ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ - ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL sanity check failed") \ -} +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \ -if ( ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->aux_next != NULL ) || \ - ( (entry_ptr)->aux_prev != NULL ) || \ - ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \ - ( (hd_ptr) != (tail_ptr) ) \ - ) || \ - ( (len) < 0 ) || \ - ( ( (len) == 1 ) && \ - ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \ - ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \ - ) \ - ) || \ - ( ( (len) >= 1 ) && \ - ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->aux_prev != NULL ) || \ - ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \ - ) \ - ) \ - ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL pre insert SC failed") \ -} +#define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( !((entry_ptr)->is_pinned) ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list. */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* If the entry is clean when it is evicted, it should be on the \ + * clean LRU list, if it was dirty, it should be on the dirty LRU list. \ + * Remove it from the appropriate list according to the value of the \ + * dirty flag. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ +} /* H5C__UPDATE_RP_FOR_EVICTION */ -#else /* H5C_DO_SANITY_CHECKS */ +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) -#define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) -#define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) +#define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( !((entry_ptr)->is_pinned) ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list. */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ +} /* H5C__UPDATE_RP_FOR_EVICTION */ -#endif /* H5C_DO_SANITY_CHECKS */ +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_FLUSH + * + * Purpose: Update the replacement policy data structures for a flush + * of the specified cache entry. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. + * + * Return: N/A + * + * Programmer: John Mainzer, 5/6/04 + * + * Modifications: + * + * JRM - 7/27/04 + * Converted the function H5C_update_rp_for_flush() to the + * macro H5C__UPDATE_RP_FOR_FLUSH in an effort to squeeze + * a bit more performance out of the cache. + * + * At least for the first cut, I am leaving the comments and + * white space in the macro. If they cause dificulties with + * pre-processor, I'll have to remove them. + * + * JRM - 7/28/04 + * Split macro into two versions, one supporting the clean and + * dirty LRU lists, and the other not. Yet another attempt + * at optimization. + * + * JRM - 3/20/06 + * While pinned entries can be flushed, they don't reside in + * the replacement policy data structures when unprotected. + * Thus I modified this macro to do nothing if the entry is + * pinned. + * + * JRM - 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * + *------------------------------------------------------------------------- + */ -#define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\ -{ \ - H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \ - fail_val) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (tail_ptr)->aux_next = (entry_ptr); \ - (entry_ptr)->aux_prev = (tail_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (Size) += entry_ptr->size; \ -} /* H5C__AUX_DLL_APPEND() */ +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ -{ \ - H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ - if ( (head_ptr) == NULL ) \ - { \ - (head_ptr) = (entry_ptr); \ - (tail_ptr) = (entry_ptr); \ - } \ - else \ - { \ - (head_ptr)->aux_prev = (entry_ptr); \ - (entry_ptr)->aux_next = (head_ptr); \ - (head_ptr) = (entry_ptr); \ - } \ - (len)++; \ - (Size) += entry_ptr->size; \ -} /* H5C__AUX_DLL_PREPEND() */ +#define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( ! ((entry_ptr)->is_pinned) ) { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list, and re-insert it at the \ + * head. \ + */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* since the entry is being flushed or cleared, one would think \ + * that it must be dirty -- but that need not be the case. Use the \ + * dirty flag to infer whether the entry is on the clean or dirty \ + * LRU list, and remove it. Then insert it at the head of the \ + * clean LRU list. \ + * \ + * The function presumes that a dirty entry will be either cleared \ + * or flushed shortly, so it is OK if we put a dirty entry on the \ + * clean LRU list. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + } \ +} /* H5C__UPDATE_RP_FOR_FLUSH */ -#define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ -{ \ - H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \ - { \ - if ( (head_ptr) == (entry_ptr) ) \ - { \ - (head_ptr) = (entry_ptr)->aux_next; \ - if ( (head_ptr) != NULL ) \ - (head_ptr)->aux_prev = NULL; \ - } \ - else \ - (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \ - if ( (tail_ptr) == (entry_ptr) ) \ - { \ - (tail_ptr) = (entry_ptr)->aux_prev; \ - if ( (tail_ptr) != NULL ) \ - (tail_ptr)->aux_next = NULL; \ - } \ - else \ - (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \ - entry_ptr->aux_next = NULL; \ - entry_ptr->aux_prev = NULL; \ - (len)--; \ - (Size) -= entry_ptr->size; \ - } \ -} /* H5C__AUX_DLL_REMOVE() */ +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( ! ((entry_ptr)->is_pinned) ) { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list, and re-insert it at the \ + * head. \ + */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + } \ +} /* H5C__UPDATE_RP_FOR_FLUSH */ + +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -/*********************************************************************** +/*------------------------------------------------------------------------- * - * Stats collection macros + * Macro: H5C__UPDATE_RP_FOR_INSERTION * - * The following macros must handle stats collection when this collection - * is enabled, and evaluate to the empty string when it is not. + * Purpose: Update the replacement policy data structures for an + * insertion of the specified cache entry. * - * The sole exception to this rule is - * H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as - * the cache hit rate stats are always collected and available. + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. * - * Changes: + * Return: N/A * - * JRM -- 3/21/06 - * Added / updated macros for pinned entry related stats. + * Programmer: John Mainzer, 5/17/04 * - * JRM -- 8/9/06 - * More pinned entry stats related updates. + * Modifications: * - * JRM -- 3/31/07 - * Updated H5C__UPDATE_STATS_FOR_PROTECT() to keep stats on - * read and write protects. + * JRM - 7/27/04 + * Converted the function H5C_update_rp_for_insertion() to the + * macro H5C__UPDATE_RP_FOR_INSERTION in an effort to squeeze + * a bit more performance out of the cache. * - * MAM -- 1/15/09 - * Created H5C__UPDATE_MAX_INDEX_SIZE_STATS to contain - * common code within macros that update the maximum - * index, clean_index, and dirty_index statistics fields. + * At least for the first cut, I am leaving the comments and + * white space in the macro. If they cause dificulties with + * pre-processor, I'll have to remove them. * - ***********************************************************************/ - -#define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \ - (cache_ptr->cache_accesses)++; \ - if ( hit ) { \ - (cache_ptr->cache_hits)++; \ - } \ - -#if H5C_COLLECT_CACHE_STATS - -#define H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \ - (cache_ptr)->max_index_size = (cache_ptr)->index_size; \ - if ( (cache_ptr)->clean_index_size > \ - (cache_ptr)->max_clean_index_size ) \ - (cache_ptr)->max_clean_index_size = \ - (cache_ptr)->clean_index_size; \ - if ( (cache_ptr)->dirty_index_size > \ - (cache_ptr)->max_dirty_index_size ) \ - (cache_ptr)->max_dirty_index_size = \ - (cache_ptr)->dirty_index_size; - -#define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \ - (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++; - -#define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \ - if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ - (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; - -#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \ - if ( cache_ptr->flush_in_progress ) { \ - ((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \ - } \ - if ( entry_ptr->flush_in_progress ) { \ - ((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \ - } \ - (((cache_ptr)->moves)[(entry_ptr)->type->id])++; - -#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\ - if ( cache_ptr->flush_in_progress ) { \ - ((cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id])++; \ - } \ - if ( entry_ptr->flush_in_progress ) { \ - ((cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id])++; \ - } \ - if ( (entry_ptr)->size < (new_size) ) { \ - ((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ - } else if ( (entry_ptr)->size > (new_size) ) { \ - ((cache_ptr)->size_decreases[(entry_ptr)->type->id])++; \ - } - -#define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ - (cache_ptr)->total_ht_insertions++; - -#define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ - (cache_ptr)->total_ht_deletions++; - -#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \ - if ( success ) { \ - (cache_ptr)->successful_ht_searches++; \ - (cache_ptr)->total_successful_ht_search_depth += depth; \ - } else { \ - (cache_ptr)->failed_ht_searches++; \ - (cache_ptr)->total_failed_ht_search_depth += depth; \ - } - -#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \ - ((cache_ptr)->unpins)[(entry_ptr)->type->id]++; - -#if H5C_COLLECT_CACHE_ENTRY_STATS - -#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \ -{ \ - (entry_ptr)->accesses = 0; \ - (entry_ptr)->clears = 0; \ - (entry_ptr)->flushes = 0; \ - (entry_ptr)->pins = 0; \ -} - -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ -{ \ - (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) \ - (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ - ((entry_ptr)->clears)++; \ -} - -#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ -{ \ - (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) \ - (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ - ((entry_ptr)->flushes)++; \ -} - -#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \ -{ \ - (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->accesses > \ - ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] = \ - (entry_ptr)->accesses; \ - if ( (entry_ptr)->accesses < \ - ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] = \ - (entry_ptr)->accesses; \ - if ( (entry_ptr)->clears > \ - ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \ - = (entry_ptr)->clears; \ - if ( (entry_ptr)->flushes > \ - ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \ - = (entry_ptr)->flushes; \ - if ( (entry_ptr)->size > \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ - = (entry_ptr)->size; \ - if ( (entry_ptr)->pins > \ - ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \ - = (entry_ptr)->pins; \ -} + * JRM - 7/28/04 + * Split macro into two version, one supporting the clean and + * dirty LRU lists, and the other not. Yet another attempt + * at optimization. + * + * JRM - 3/10/06 + * This macro should never be called on a pinned entry. + * Inserted an assert to verify this. + * + * JRM - 8/9/06 + * Not any more. We must now allow insertion of pinned + * entries. Updated macro to support this. + * + * JRM - 3/28/07 + * Added sanity checks using the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * + *------------------------------------------------------------------------- + */ -#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ -{ \ - (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) { \ - (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ - (entry_ptr)->pins++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ - } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ - (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ - if ( (entry_ptr)->size > \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] \ - = (entry_ptr)->size; \ - } \ -} +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ -{ \ - if ( hit ) \ - ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ - else \ - ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ - if ( ! ((entry_ptr)->is_read_only) ) { \ - ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ - } else { \ - ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ - if ( ((entry_ptr)->ro_ref_count) > \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ - ((entry_ptr)->ro_ref_count); \ - } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ - (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ - if ( (entry_ptr)->size > \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_size)[(entry_ptr)->type->id] = (entry_ptr)->size; \ - ((entry_ptr)->accesses)++; \ +#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* insert the entry at the head of the clean or dirty LRU list as \ + * appropriate. \ + */ \ + \ + if ( entry_ptr->is_dirty ) { \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + } else { \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ + } \ } -#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ -{ \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ - (entry_ptr)->pins++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + } \ } -#else /* H5C_COLLECT_CACHE_ENTRY_STATS */ +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_PROTECT + * + * Purpose: Update the replacement policy data structures for a + * protect of the specified cache entry. + * + * To do this, unlink the specified entry from any data + * structures used by the replacement policy, and add the + * entry to the protected list. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. + * + * Return: N/A + * + * Programmer: John Mainzer, 5/17/04 + * + * Modifications: + * + * JRM - 7/27/04 + * Converted the function H5C_update_rp_for_protect() to the + * macro H5C__UPDATE_RP_FOR_PROTECT in an effort to squeeze + * a bit more performance out of the cache. + * + * At least for the first cut, I am leaving the comments and + * white space in the macro. If they cause dificulties with + * pre-processor, I'll have to remove them. + * + * JRM - 7/28/04 + * Split macro into two version, one supporting the clean and + * dirty LRU lists, and the other not. Yet another attempt + * at optimization. + * + * JRM - 3/17/06 + * Modified macro to attempt to remove pinned entriese from + * the pinned entry list instead of from the data structures + * maintained by the replacement policy. + * + * JRM - 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * + *------------------------------------------------------------------------- + */ -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \ -{ \ - if ( (entry_ptr)->is_pinned ) \ - (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \ - (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \ -} +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + +#define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + HDassert( (cache_ptr)->pel_len >= 0 ); \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list. */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* Similarly, remove the entry from the clean or dirty LRU list \ + * as appropriate. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + \ + } else { \ + \ + H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ + } \ + \ + /* Regardless of the replacement policy, or whether the entry is \ + * pinned, now add the entry to the protected list. \ + */ \ + \ + H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ + (cache_ptr)->pl_tail_ptr, \ + (cache_ptr)->pl_len, \ + (cache_ptr)->pl_size, (fail_val)) \ +} /* H5C__UPDATE_RP_FOR_PROTECT */ -#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \ -{ \ - (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) \ - (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \ -} +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \ - (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; +#define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + HDassert( (cache_ptr)->pel_len >= 0 ); \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list. */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + } \ + \ + /* Regardless of the replacement policy, or whether the entry is \ + * pinned, now add the entry to the protected list. \ + */ \ + \ + H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ + (cache_ptr)->pl_tail_ptr, \ + (cache_ptr)->pl_len, \ + (cache_ptr)->pl_size, (fail_val)) \ +} /* H5C__UPDATE_RP_FOR_PROTECT */ -#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \ -{ \ - (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \ - if ( (entry_ptr)->is_pinned ) { \ - (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ - } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \ - (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \ - if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \ - (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \ -} +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \ -{ \ - if ( hit ) \ - ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \ - else \ - ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \ - if ( ! ((entry_ptr)->is_read_only) ) \ - ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \ - else { \ - ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \ - if ( ((entry_ptr)->ro_ref_count) > \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \ - ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \ - ((entry_ptr)->ro_ref_count); \ - } \ - if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \ - (cache_ptr)->max_index_len = (cache_ptr)->index_len; \ - H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \ - if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \ - (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \ - if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \ - (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \ -} + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_MOVE + * + * Purpose: Update the replacement policy data structures for a + * move of the specified cache entry. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. + * + * Return: N/A + * + * Programmer: John Mainzer, 5/17/04 + * + *------------------------------------------------------------------------- + */ -#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \ -{ \ - ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \ - if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \ - (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \ - if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \ - (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \ -} +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ +#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( ! ( (entry_ptr)->is_pinned ) ) { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list, and re-insert it at the head. \ + */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* remove the entry from either the clean or dirty LUR list as \ + * indicated by the was_dirty parameter \ + */ \ + if ( was_dirty ) { \ + \ + H5C__AUX_DLL_REMOVE((entry_ptr), \ + (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, \ + (fail_val)) \ + \ + } else { \ + \ + H5C__AUX_DLL_REMOVE((entry_ptr), \ + (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, \ + (fail_val)) \ + } \ + \ + /* insert the entry at the head of either the clean or dirty \ + * LRU list as appropriate. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, \ + (fail_val)) \ + \ + } else { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, \ + (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ + } \ +} /* H5C__UPDATE_RP_FOR_MOVE */ -#else /* H5C_COLLECT_CACHE_STATS */ +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) -#define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) -#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) -#define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) -#define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) -#define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) -#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) -#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) -#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) +#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + if ( ! ( (entry_ptr)->is_pinned ) ) { \ + \ + /* modified LRU specific code */ \ + \ + /* remove the entry from the LRU list, and re-insert it at the head. \ + */ \ + \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + } \ +} /* H5C__UPDATE_RP_FOR_MOVE */ -#endif /* H5C_COLLECT_CACHE_STATS */ +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -/*********************************************************************** +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE + * + * Purpose: Update the replacement policy data structures for a + * size change of the specified cache entry. * - * Hash table access and manipulation macros: + * To do this, determine if the entry is pinned. If it is, + * update the size of the pinned entry list. * - * The following macros handle searches, insertions, and deletion in - * the hash table. + * If it isn't pinned, the entry must handled by the + * replacement policy. Update the appropriate replacement + * policy data structures. * - * When modifying these macros, remember to modify the similar macros - * in tst/cache.c + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. * - * Changes: + * Return: N/A * - * - Updated existing index macros and sanity check macros to maintain - * the clean_index_size and dirty_index_size fields of H5C_t. Also - * added macros to allow us to track entry cleans and dirties. + * Programmer: John Mainzer, 8/23/06 * - * JRM -- 11/5/08 + * Modifications: * - ***********************************************************************/ - -/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */ - -#define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3) - -#define H5C__HASH_FCN(x) (int)(((x) & H5C__HASH_MASK) >> 3) - -#if H5C_DO_SANITY_CHECKS - -#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (entry_ptr) == NULL ) || \ - ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ - ( (entry_ptr)->ht_next != NULL ) || \ - ( (entry_ptr)->ht_prev != NULL ) || \ - ( (entry_ptr)->size <= 0 ) || \ - ( (k = H5C__HASH_FCN((entry_ptr)->addr)) < 0 ) || \ - ( k >= H5C__HASH_TABLE_LEN ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \ - "Pre HT insert SC failed") \ -} - -#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len < 1 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( ! H5F_addr_defined((entry_ptr)->addr) ) || \ - ( (entry_ptr)->size <= 0 ) || \ - ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \ - ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \ - ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ - == NULL ) || \ - ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \ - != (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev == NULL ) ) || \ - ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \ - (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev != NULL ) ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \ -} - -/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ -#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( ! H5F_addr_defined(Addr) ) || \ - ( H5C__HASH_FCN(Addr) < 0 ) || \ - ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT search SC failed") \ -} - -/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */ -#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len < 1 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \ - ( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \ - ( (entry_ptr)->size <= 0 ) || \ - ( ((cache_ptr)->index)[k] == NULL ) || \ - ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev == NULL ) ) || \ - ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \ - ( (entry_ptr)->ht_prev != NULL ) ) || \ - ( ( (entry_ptr)->ht_prev != NULL ) && \ - ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \ - ( ( (entry_ptr)->ht_next != NULL ) && \ - ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \ - "Post successful HT search SC failed") \ -} - -/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */ -#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ -if ( ( (cache_ptr) == NULL ) || \ - ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \ - ( (entry_ptr)->ht_prev != NULL ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \ - "Post HT shift to front SC failed") \ -} + * JRM -- 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * + *------------------------------------------------------------------------- + */ -#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (cache_ptr)->index_size <= 0 ) || \ - ( (new_size) <= 0 ) || \ - ( (old_size) > (cache_ptr)->index_size ) || \ - ( (new_size) <= 0 ) || \ - ( ( (cache_ptr)->index_len == 1 ) && \ - ( (cache_ptr)->index_size != (old_size) ) ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( (entry_ptr == NULL) ) || \ - ( ( !( was_clean ) || \ - ( (cache_ptr)->clean_index_size < (old_size) ) ) && \ - ( ( (was_clean) ) || \ - ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \ - ( (entry_ptr) == NULL ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Pre HT entry size change SC failed") \ -} +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr) \ -if ( ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (cache_ptr)->index_size <= 0 ) || \ - ( (new_size) > (cache_ptr)->index_size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + \ - (cache_ptr)->dirty_index_size) ) || \ - ( ( !((entry_ptr)->is_dirty ) || \ - ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \ - ( ( ((entry_ptr)->is_dirty) ) || \ - ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \ - ( ( (cache_ptr)->index_len == 1 ) && \ - ( (cache_ptr)->index_size != (new_size) ) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Post HT entry size change SC failed") \ -} +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + HDassert( new_size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, \ + (entry_ptr)->size, \ + (new_size)); \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* Update the size of the LRU list */ \ + \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, \ + (entry_ptr)->size, \ + (new_size)); \ + \ + /* Similarly, update the size of the clean or dirty LRU list as \ + * appropriate. At present, the entry must be clean, but that \ + * could change. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, \ + (entry_ptr)->size, \ + (new_size)); \ + \ + } else { \ + \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, \ + (entry_ptr)->size, \ + (new_size)); \ + } \ + \ + /* End modified LRU specific code. */ \ + } \ + \ +} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ -if ( \ - ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->is_dirty != FALSE ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Pre HT update for entry clean SC failed") \ -} +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ -if ( \ - ( (cache_ptr) == NULL ) || \ - ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \ - ( (cache_ptr)->index_len <= 0 ) || \ - ( (entry_ptr) == NULL ) || \ - ( (entry_ptr)->is_dirty != TRUE ) || \ - ( (cache_ptr)->index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \ - ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Pre HT update for entry dirty SC failed") \ -} +#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->size > 0 ); \ + HDassert( new_size > 0 ); \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, \ + (entry_ptr)->size, \ + (new_size)); \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* Update the size of the LRU list */ \ + \ + H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, \ + (entry_ptr)->size, \ + (new_size)); \ + \ + /* End modified LRU specific code. */ \ + } \ + \ +} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ -#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \ -if ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Post HT update for entry clean SC failed") \ -} +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \ -if ( (cache_ptr)->index_size != \ - ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \ - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "Post HT update for entry dirty SC failed") \ -} + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_UNPIN + * + * Purpose: Update the replacement policy data structures for an + * unpin of the specified cache entry. + * + * To do this, unlink the specified entry from the protected + * entry list, and re-insert it in the data structures used + * by the current replacement policy. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the macro + * should switch on the current policy and act accordingly. + * + * Return: N/A + * + * Programmer: John Mainzer, 3/22/06 + * + * Modifications: + * + * JRM -- 3/28/07 + * Added sanity checks based on the new is_read_only and + * ro_ref_count fields of struct H5C_cache_entry_t. + * + *------------------------------------------------------------------------- + */ -#else /* H5C_DO_SANITY_CHECKS */ +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS -#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) -#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) -#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) -#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) -#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) -#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) -#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) -#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr) -#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) -#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) +#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->is_pinned); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* Regardless of the replacement policy, remove the entry from the \ + * pinned entry list. \ + */ \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + HDassert( (cache_ptr)->pel_len >= 0 ); \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* Similarly, insert the entry at the head of either the clean \ + * or dirty LRU list as appropriate. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, \ + (fail_val)) \ + \ + } else { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), \ + (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, \ + (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ + \ +} /* H5C__UPDATE_RP_FOR_UNPIN */ -#endif /* H5C_DO_SANITY_CHECKS */ +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + +#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( !((entry_ptr)->is_protected) ); \ + HDassert( !((entry_ptr)->is_read_only) ); \ + HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ + HDassert( (entry_ptr)->is_pinned); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* Regardless of the replacement policy, remove the entry from the \ + * pinned entry list. \ + */ \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + HDassert( (cache_ptr)->pel_len >= 0 ); \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + \ +} /* H5C__UPDATE_RP_FOR_UNPIN */ +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \ -{ \ - int k; \ - H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \ - k = H5C__HASH_FCN((entry_ptr)->addr); \ - if ( ((cache_ptr)->index)[k] == NULL ) \ - { \ - ((cache_ptr)->index)[k] = (entry_ptr); \ - } \ - else \ - { \ - (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ - (entry_ptr)->ht_next->ht_prev = (entry_ptr); \ - ((cache_ptr)->index)[k] = (entry_ptr); \ - } \ - (cache_ptr)->index_len++; \ - (cache_ptr)->index_size += (entry_ptr)->size; \ - if ( (entry_ptr)->is_dirty ) { \ - (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ - } else { \ - (cache_ptr)->clean_index_size += (entry_ptr)->size; \ - } \ - if ((entry_ptr)->flush_me_last) { \ - (cache_ptr)->num_last_entries++; \ - HDassert((cache_ptr)->num_last_entries == 1); \ - } \ - H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \ -} + +/*------------------------------------------------------------------------- + * + * Macro: H5C__UPDATE_RP_FOR_UNPROTECT + * + * Purpose: Update the replacement policy data structures for an + * unprotect of the specified cache entry. + * + * To do this, unlink the specified entry from the protected + * list, and re-insert it in the data structures used by the + * current replacement policy. + * + * At present, we only support the modified LRU policy, so + * this function deals with that case unconditionally. If + * we ever support other replacement policies, the function + * should switch on the current policy and act accordingly. + * + * Return: N/A + * + * Programmer: John Mainzer, 5/19/04 + * + * Modifications: + * + * JRM - 7/27/04 + * Converted the function H5C_update_rp_for_unprotect() to + * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to + * squeeze a bit more performance out of the cache. + * + * At least for the first cut, I am leaving the comments and + * white space in the macro. If they cause dificulties with + * pre-processor, I'll have to remove them. + * + * JRM - 7/28/04 + * Split macro into two version, one supporting the clean and + * dirty LRU lists, and the other not. Yet another attempt + * at optimization. + * + * JRM - 3/17/06 + * Modified macro to put pinned entries on the pinned entry + * list instead of inserting them in the data structures + * maintained by the replacement policy. + * + *------------------------------------------------------------------------- + */ -#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \ -{ \ - int k; \ - H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \ - k = H5C__HASH_FCN((entry_ptr)->addr); \ - if ( (entry_ptr)->ht_next ) \ - { \ - (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ - } \ - if ( (entry_ptr)->ht_prev ) \ - { \ - (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ - } \ - if ( ((cache_ptr)->index)[k] == (entry_ptr) ) \ - { \ - ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \ - } \ - (entry_ptr)->ht_next = NULL; \ - (entry_ptr)->ht_prev = NULL; \ - (cache_ptr)->index_len--; \ - (cache_ptr)->index_size -= (entry_ptr)->size; \ - if ( (entry_ptr)->is_dirty ) { \ - (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ - } else { \ - (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ - } \ - if ((entry_ptr)->flush_me_last) { \ - (cache_ptr)->num_last_entries--; \ - HDassert((cache_ptr)->num_last_entries == 0); \ - } \ - H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \ -} +#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + +#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( (entry_ptr)->is_protected); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* Regardless of the replacement policy, remove the entry from the \ + * protected list. \ + */ \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ + (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ + (cache_ptr)->pl_size, (fail_val)) \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* Similarly, insert the entry at the head of either the clean or \ + * dirty LRU list as appropriate. \ + */ \ + \ + if ( (entry_ptr)->is_dirty ) { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ + (cache_ptr)->dLRU_tail_ptr, \ + (cache_ptr)->dLRU_list_len, \ + (cache_ptr)->dLRU_list_size, (fail_val)) \ + \ + } else { \ + \ + H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ + (cache_ptr)->cLRU_tail_ptr, \ + (cache_ptr)->cLRU_list_len, \ + (cache_ptr)->cLRU_list_size, (fail_val)) \ + } \ + \ + /* End modified LRU specific code. */ \ + } \ + \ +} /* H5C__UPDATE_RP_FOR_UNPROTECT */ -#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \ -{ \ - int k; \ - int depth = 0; \ - H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ - k = H5C__HASH_FCN(Addr); \ - entry_ptr = ((cache_ptr)->index)[k]; \ - while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \ - { \ - (entry_ptr) = (entry_ptr)->ht_next; \ - (depth)++; \ - } \ - if ( entry_ptr ) \ - { \ - H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \ - if ( entry_ptr != ((cache_ptr)->index)[k] ) \ - { \ - if ( (entry_ptr)->ht_next ) \ - { \ - (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ - } \ - HDassert( (entry_ptr)->ht_prev != NULL ); \ - (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ - ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ - (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ - (entry_ptr)->ht_prev = NULL; \ - ((cache_ptr)->index)[k] = (entry_ptr); \ - H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ - } \ - } \ - H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \ -} +#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \ -{ \ - int k; \ - int depth = 0; \ - H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \ - k = H5C__HASH_FCN(Addr); \ - entry_ptr = ((cache_ptr)->index)[k]; \ - while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \ - { \ - (entry_ptr) = (entry_ptr)->ht_next; \ - (depth)++; \ - } \ - if ( entry_ptr ) \ - { \ - H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \ - if ( entry_ptr != ((cache_ptr)->index)[k] ) \ - { \ - if ( (entry_ptr)->ht_next ) \ - { \ - (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \ - } \ - HDassert( (entry_ptr)->ht_prev != NULL ); \ - (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \ - ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \ - (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \ - (entry_ptr)->ht_prev = NULL; \ - ((cache_ptr)->index)[k] = (entry_ptr); \ - H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \ - } \ - } \ -} +#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ +{ \ + HDassert( (cache_ptr) ); \ + HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ + HDassert( (entry_ptr) ); \ + HDassert( (entry_ptr)->is_protected); \ + HDassert( (entry_ptr)->size > 0 ); \ + \ + /* Regardless of the replacement policy, remove the entry from the \ + * protected list. \ + */ \ + H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ + (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ + (cache_ptr)->pl_size, (fail_val)) \ + \ + if ( (entry_ptr)->is_pinned ) { \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ + (cache_ptr)->pel_tail_ptr, \ + (cache_ptr)->pel_len, \ + (cache_ptr)->pel_size, (fail_val)) \ + \ + } else { \ + \ + /* modified LRU specific code */ \ + \ + /* insert the entry at the head of the LRU list. */ \ + \ + H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ + (cache_ptr)->LRU_tail_ptr, \ + (cache_ptr)->LRU_list_len, \ + (cache_ptr)->LRU_list_size, (fail_val)) \ + \ + /* End modified LRU specific code. */ \ + } \ +} /* H5C__UPDATE_RP_FOR_UNPROTECT */ -#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \ -{ \ - H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ - (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \ - (cache_ptr)->clean_index_size += (entry_ptr)->size; \ - H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \ -} +#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ -#define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) \ -{ \ - H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ - (cache_ptr)->clean_index_size -= (entry_ptr)->size; \ - (cache_ptr)->dirty_index_size += (entry_ptr)->size; \ - H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \ -} -#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) \ -{ \ - H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr, was_clean) \ - (cache_ptr)->index_size -= (old_size); \ - (cache_ptr)->index_size += (new_size); \ - if ( was_clean ) { \ - (cache_ptr)->clean_index_size -= (old_size); \ - } else { \ - (cache_ptr)->dirty_index_size -= (old_size); \ - } \ - if ( (entry_ptr)->is_dirty ) { \ - (cache_ptr)->dirty_index_size += (new_size); \ - } else { \ - (cache_ptr)->clean_index_size += (new_size); \ - } \ - H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \ - entry_ptr) \ -} +/****************************/ +/* Package Private Typedefs */ +/****************************/ - -/************************************************************************** +/**************************************************************************** + * + * structure H5C_t + * + * Catchall structure for all variables specific to an instance of the cache. + * + * While the individual fields of the structure are discussed below, the + * following overview may be helpful. + * + * Entries in the cache are stored in an instance of H5TB_TREE, indexed on + * the entry's disk address. While the H5TB_TREE is less efficient than + * hash table, it keeps the entries in address sorted order. As flushes + * in parallel mode are more efficient if they are issued in increasing + * address order, this is a significant benefit. Also the H5TB_TREE code + * was readily available, which reduced development time. + * + * While the cache was designed with multiple replacement policies in mind, + * at present only a modified form of LRU is supported. + * + * JRM - 4/26/04 + * + * Profiling has indicated that searches in the instance of H5TB_TREE are + * too expensive. To deal with this issue, I have augmented the cache + * with a hash table in which all entries will be stored. Given the + * advantages of flushing entries in increasing address order, the TBBT + * is retained, but only dirty entries are stored in it. At least for + * now, we will leave entries in the TBBT after they are flushed. + * + * Note that index_size and index_len now refer to the total size of + * and number of entries in the hash table. + * + * JRM - 7/19/04 + * + * The TBBT has since been replaced with a skip list. This change + * greatly predates this note. + * + * JRM - 9/26/05 + * + * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC. + * This field is used to validate pointers to instances of + * H5C_t. + * + * flush_in_progress: Boolean flag indicating whether a flush is in + * progress. + * + * trace_file_ptr: File pointer pointing to the trace file, which is used + * to record cache operations for use in simulations and design + * studies. This field will usually be NULL, indicating that + * no trace file should be recorded. + * + * Since much of the code supporting the parallel metadata + * cache is in H5AC, we don't write the trace file from + * H5C. Instead, H5AC reads the trace_file_ptr as needed. + * + * When we get to using H5C in other places, we may add + * code to write trace file data at the H5C level as well. + * + * aux_ptr: Pointer to void used to allow wrapper code to associate + * its data with an instance of H5C_t. The H5C cache code + * sets this field to NULL, and otherwise leaves it alone. + * + * max_type_id: Integer field containing the maximum type id number assigned + * to a type of entry in the cache. All type ids from 0 to + * max_type_id inclusive must be defined. The names of the + * types are stored in the type_name_table discussed below, and + * indexed by the ids. + * + * type_name_table_ptr: Pointer to an array of pointer to char of length + * max_type_id + 1. The strings pointed to by the entries + * in the array are the names of the entry types associated + * with the indexing type IDs. + * + * max_cache_size: Nominal maximum number of bytes that may be stored in the + * cache. This value should be viewed as a soft limit, as the + * cache can exceed this value under the following circumstances: + * + * a) All entries in the cache are protected, and the cache is + * asked to insert a new entry. In this case the new entry + * will be created. If this causes the cache to exceed + * max_cache_size, it will do so. The cache will attempt + * to reduce its size as entries are unprotected. + * + * b) When running in parallel mode, the cache may not be + * permitted to flush a dirty entry in response to a read. + * If there are no clean entries available to evict, the + * cache will exceed its maximum size. Again the cache + * will attempt to reduce its size to the max_cache_size + * limit on the next cache write. * - * Skip list insertion and deletion macros: + * c) When an entry increases in size, the cache may exceed + * the max_cache_size limit until the next time the cache + * attempts to load or insert an entry. * - * These used to be functions, but I converted them to macros to avoid some - * function call overhead. + * min_clean_size: Nominal minimum number of clean bytes in the cache. + * The cache attempts to maintain this number of bytes of + * clean data so as to avoid case b) above. Again, this is + * a soft limit. * - **************************************************************************/ - -/*------------------------------------------------------------------------- * - * Macro: H5C__INSERT_ENTRY_IN_SLIST + * In addition to the call back functions required for each entry, the + * cache requires the following call back functions for this instance of + * the cache as a whole: * - * Purpose: Insert the specified instance of H5C_cache_entry_t into - * the skip list in the specified instance of H5C_t. Update - * the associated length and size fields. + * check_write_permitted: In certain applications, the cache may not + * be allowed to write to disk at certain time. If specified, + * the check_write_permitted function is used to determine if + * a write is permissible at any given point in time. * - * Return: N/A + * If no such function is specified (i.e. this field is NULL), + * the cache uses the following write_permitted field to + * determine whether writes are permitted. * - * Programmer: John Mainzer, 5/10/04 + * write_permitted: If check_write_permitted is NULL, this boolean flag + * indicates whether writes are permitted. * - * Modifications: + * log_flush: If provided, this function is called whenever a dirty + * entry is flushed to disk. * - * JRM -- 7/21/04 - * Updated function to set the in_tree flag when inserting - * an entry into the tree. Also modified the function to - * update the tree size and len fields instead of the similar - * index fields. * - * All of this is part of the modifications to support the - * hash table. + * In cases where memory is plentiful, and performance is an issue, it may + * be useful to disable all cache evictions, and thereby postpone metadata + * writes. The following field is used to implement this. * - * JRM -- 7/27/04 - * Converted the function H5C_insert_entry_in_tree() into - * the macro H5C__INSERT_ENTRY_IN_TREE in the hopes of - * wringing a little more speed out of the cache. + * evictions_enabled: Boolean flag that is initialized to TRUE. When + * this flag is set to FALSE, the metadata cache will not + * attempt to evict entries to make space for newly protected + * entries, and instead the will grow without limit. + * + * Needless to say, this feature must be used with care. * - * Note that we don't bother to check if the entry is already - * in the tree -- if it is, H5SL_insert() will fail. * - * QAK -- 11/27/04 - * Switched over to using skip list routines. + * The cache requires an index to facilitate searching for entries. The + * following fields support that index. * - * JRM -- 6/27/06 - * Added fail_val parameter. + * index_len: Number of entries currently in the hash table used to index + * the cache. * - * JRM -- 8/25/06 - * Added the H5C_DO_SANITY_CHECKS version of the macro. + * index_size: Number of bytes of cache entries currently stored in the + * hash table used to index the cache. * - * This version maintains the slist_len_increase and - * slist_size_increase fields that are used in sanity - * checks in the flush routines. + * This value should not be mistaken for footprint of the + * cache in memory. The average cache entry is small, and + * the cache has a considerable overhead. Multiplying the + * index_size by two should yield a conservative estimate + * of the cache's memory footprint. * - * All this is needed as the fractal heap needs to be - * able to dirty, resize and/or move entries during the - * flush. + * clean_index_size: Number of bytes of clean entries currently stored in + * the hash table. Note that the index_size field (above) + * is also the sum of the sizes of all entries in the cache. + * Thus we should have the invariant that clean_index_size + + * dirty_index_size == index_size. * - *------------------------------------------------------------------------- - */ - -#if H5C_DO_SANITY_CHECKS - -#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ - HDassert( !((entry_ptr)->in_slist) ); \ - \ - if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \ - < 0 ) \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ - "Can't insert entry in skip list") \ - \ - (entry_ptr)->in_slist = TRUE; \ - (cache_ptr)->slist_len++; \ - (cache_ptr)->slist_size += (entry_ptr)->size; \ - (cache_ptr)->slist_len_increase++; \ - (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \ - \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( (cache_ptr)->slist_size > 0 ); \ - \ -} /* H5C__INSERT_ENTRY_IN_SLIST */ - -#else /* H5C_DO_SANITY_CHECKS */ - -#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( H5F_addr_defined((entry_ptr)->addr) ); \ - HDassert( !((entry_ptr)->in_slist) ); \ - \ - if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) \ - < 0 ) \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \ - "Can't insert entry in skip list") \ - \ - (entry_ptr)->in_slist = TRUE; \ - (cache_ptr)->slist_len++; \ - (cache_ptr)->slist_size += (entry_ptr)->size; \ - \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( (cache_ptr)->slist_size > 0 ); \ - \ -} /* H5C__INSERT_ENTRY_IN_SLIST */ - -#endif /* H5C_DO_SANITY_CHECKS */ - - -/*------------------------------------------------------------------------- + * WARNING: * - * Function: H5C__REMOVE_ENTRY_FROM_SLIST + * 1) The clean_index_size field is not maintained by the + * index macros, as the hash table doesn't care whether + * the entry is clean or dirty. Instead the field is + * maintained in the H5C__UPDATE_RP macros. * - * Purpose: Remove the specified instance of H5C_cache_entry_t from the - * index skip list in the specified instance of H5C_t. Update - * the associated length and size fields. + * 2) The value of the clean_index_size must not be mistaken + * for the current clean size of the cache. Rather, the + * clean size of the cache is the current value of + * clean_index_size plus the amount of empty space (if any) + * in the cache. * - * Return: N/A + * dirty_index_size: Number of bytes of dirty entries currently stored in + * the hash table. Note that the index_size field (above) + * is also the sum of the sizes of all entries in the cache. + * Thus we should have the invariant that clean_index_size + + * dirty_index_size == index_size. * - * Programmer: John Mainzer, 5/10/04 + * WARNING: * - * Modifications: + * 1) The dirty_index_size field is not maintained by the + * index macros, as the hash table doesn't care whether + * the entry is clean or dirty. Instead the field is + * maintained in the H5C__UPDATE_RP macros. * - * JRM -- 7/21/04 - * Updated function for the addition of the hash table. + * index: Array of pointer to H5C_cache_entry_t of size + * H5C__HASH_TABLE_LEN. At present, this value is a power + * of two, not the usual prime number. * - * JRM - 7/27/04 - * Converted from the function H5C_remove_entry_from_tree() - * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of - * wringing a little more performance out of the cache. + * I hope that the variable size of cache elements, the large + * hash table size, and the way in which HDF5 allocates space + * will combine to avoid problems with periodicity. If so, we + * can use a trivial hash function (a bit-and and a 3 bit left + * shift) with some small savings. * - * QAK -- 11/27/04 - * Switched over to using skip list routines. + * If not, it will become evident in the statistics. Changing + * to the usual prime number length hash table will require + * changing the H5C__HASH_FCN macro and the deletion of the + * H5C__HASH_MASK #define. No other changes should be required. * - * JRM -- 3/28/07 - * Updated sanity checks for the new is_read_only and - * ro_ref_count fields in H5C_cache_entry_t. * - *------------------------------------------------------------------------- - */ - -#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( (entry_ptr)->in_slist ); \ - HDassert( (cache_ptr)->slist_ptr ); \ - \ - if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \ - != (entry_ptr) ) \ - \ - HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \ - "Can't delete entry from skip list.") \ - \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - (cache_ptr)->slist_len--; \ - HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \ - (cache_ptr)->slist_size -= (entry_ptr)->size; \ - (entry_ptr)->in_slist = FALSE; \ -} /* H5C__REMOVE_ENTRY_FROM_SLIST */ - - -/*------------------------------------------------------------------------- + * With the addition of cache entry tagging, it is possible that + * an entry may be inserted into the cache without a tag during testing + * and the tag's validity shouldn't be checked. * - * Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE + * The following field is maintained to facilitate this. * - * Purpose: Update cache_ptr->slist_size for a change in the size of - * and entry in the slist. + * ignore_tags: Boolean flag to disable tag validation during entry insertion. * - * Return: N/A + * When we flush the cache, we need to write entries out in increasing + * address order. An instance of a skip list is used to store dirty entries in + * sorted order. Whether it is cheaper to sort the dirty entries as needed, + * or to maintain the list is an open question. At a guess, it depends + * on how frequently the cache is flushed. We will see how it goes. * - * Programmer: John Mainzer, 9/07/05 + * For now at least, I will not remove dirty entries from the list as they + * are flushed. (this has been changed -- dirty entries are now removed from + * the skip list as they are flushed. JRM - 10/25/05) * - * Modifications: + * slist_len: Number of entries currently in the skip list + * used to maintain a sorted list of dirty entries in the + * cache. + * + * slist_size: Number of bytes of cache entries currently stored in the + * skip list used to maintain a sorted list of + * dirty entries in the cache. + * + * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted + * list of dirty entries in the cache. This sorted list has + * two uses: + * + * a) It allows us to flush dirty entries in increasing address + * order, which results in significant savings. + * + * b) It facilitates checking for adjacent dirty entries when + * attempting to evict entries from the cache. While we + * don't use this at present, I hope that this will allow + * some optimizations when I get to it. + * + * num_last_entries: The number of entries in the cache that can only be + * flushed after all other entries in the cache have + * been flushed. At this time, this will only ever be + * one entry (the superblock), and the code has been + * protected with HDasserts to enforce this. This restraint + * can certainly be relaxed in the future if the need for + * multiple entries being flushed last arises, though + * explicit tests for that case should be added when said + * HDasserts are removed. * - * JRM -- 8/27/06 - * Added the H5C_DO_SANITY_CHECKS version of the macro. + * With the addition of the fractal heap, the cache must now deal with + * the case in which entries may be dirtied, moved, or have their sizes + * changed during a flush. To allow sanity checks in this situation, the + * following two fields have been added. They are only compiled in when + * H5C_DO_SANITY_CHECKS is TRUE. * - * This version maintains the slist_size_increase field - * that are used in sanity checks in the flush routines. + * slist_len_increase: Number of entries that have been added to the + * slist since the last time this field was set to zero. * - * All this is needed as the fractal heap needs to be - * able to dirty, resize and/or move entries during the - * flush. + * slist_size_increase: Total size of all entries that have been added + * to the slist since the last time this field was set to + * zero. * - *------------------------------------------------------------------------- - */ - -#if H5C_DO_SANITY_CHECKS - -#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (old_size) > 0 ); \ - HDassert( (new_size) > 0 ); \ - HDassert( (old_size) <= (cache_ptr)->slist_size ); \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( ((cache_ptr)->slist_len > 1) || \ - ( (cache_ptr)->slist_size == (old_size) ) ); \ - \ - (cache_ptr)->slist_size -= (old_size); \ - (cache_ptr)->slist_size += (new_size); \ - \ - (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \ - (cache_ptr)->slist_size_increase += (int64_t)(new_size); \ - \ - HDassert( (new_size) <= (cache_ptr)->slist_size ); \ - HDassert( ( (cache_ptr)->slist_len > 1 ) || \ - ( (cache_ptr)->slist_size == (new_size) ) ); \ -} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ - -#else /* H5C_DO_SANITY_CHECKS */ - -#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (old_size) > 0 ); \ - HDassert( (new_size) > 0 ); \ - HDassert( (old_size) <= (cache_ptr)->slist_size ); \ - HDassert( (cache_ptr)->slist_len > 0 ); \ - HDassert( ((cache_ptr)->slist_len > 1) || \ - ( (cache_ptr)->slist_size == (old_size) ) ); \ - \ - (cache_ptr)->slist_size -= (old_size); \ - (cache_ptr)->slist_size += (new_size); \ - \ - HDassert( (new_size) <= (cache_ptr)->slist_size ); \ - HDassert( ( (cache_ptr)->slist_len > 1 ) || \ - ( (cache_ptr)->slist_size == (new_size) ) ); \ -} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ - -#endif /* H5C_DO_SANITY_CHECKS */ - - -/************************************************************************** * - * Replacement policy update macros: + * When a cache entry is protected, it must be removed from the LRU + * list(s) as it cannot be either flushed or evicted until it is unprotected. + * The following fields are used to implement the protected list (pl). * - * These used to be functions, but I converted them to macros to avoid some - * function call overhead. + * pl_len: Number of entries currently residing on the protected list. * - **************************************************************************/ - -/*------------------------------------------------------------------------- + * pl_size: Number of bytes of cache entries currently residing on the + * protected list. * - * Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS + * pl_head_ptr: Pointer to the head of the doubly linked list of protected + * entries. Note that cache entries on this list are linked + * by their next and prev fields. * - * Purpose: For efficiency, we sometimes change the order of flushes -- - * but doing so can confuse the replacement policy. This - * macro exists to allow us to specify an entry as the - * most recently touched so we can repair any such - * confusion. + * This field is NULL if the list is empty. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the macro - * should switch on the current policy and act accordingly. + * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected + * entries. Note that cache entries on this list are linked + * by their next and prev fields. * - * Return: N/A + * This field is NULL if the list is empty. * - * Programmer: John Mainzer, 10/13/05 * - * Modifications: + * For very frequently used entries, the protect/unprotect overhead can + * become burdensome. To avoid this overhead, I have modified the cache + * to allow entries to be "pinned". A pinned entry is similar to a + * protected entry, in the sense that it cannot be evicted, and that + * the entry can be modified at any time. * - * JRM -- 3/20/06 - * Modified macro to ignore pinned entries. Pinned entries - * do not appear in the data structures maintained by the - * replacement policy code, and thus this macro has nothing - * to do if called for such an entry. + * Pinning an entry has the following implications: * - * JRM -- 3/28/07 - * Added sanity checks using the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. + * 1) A pinned entry cannot be evicted. Thus unprotected + * pinned entries reside in the pinned entry list, instead + * of the LRU list(s) (or other lists maintained by the current + * replacement policy code). * - *------------------------------------------------------------------------- - */ - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head.\ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* Use the dirty flag to infer whether the entry is on the clean or \ - * dirty LRU list, and remove it. Then insert it at the head of \ - * the same LRU list. \ - * \ - * At least initially, all entries should be clean. That may \ - * change, so we may as well deal with both cases now. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */ - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - - -/*------------------------------------------------------------------------- + * 2) A pinned entry can be accessed or modified at any time. + * Therefore, the cache must check with the entry owner + * before flushing it. If permission is denied, the + * cache just skips the entry in the flush. * - * Macro: H5C__UPDATE_RP_FOR_EVICTION + * 3) A pinned entry can be marked as dirty (and possibly + * change size) while it is unprotected. * - * Purpose: Update the replacement policy data structures for an - * eviction of the specified cache entry. + * 4) The flush-destroy code must allow pinned entries to + * be unpinned (and possibly unprotected) during the + * flush. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. + * Since pinned entries cannot be evicted, they must be kept on a pinned + * entry list (pel), instead of being entrusted to the replacement policy + * code. * - * Return: Non-negative on success/Negative on failure. + * Maintaining the pinned entry list requires the following fields: * - * Programmer: John Mainzer, 5/10/04 + * pel_len: Number of entries currently residing on the pinned + * entry list. * - * Modifications: + * pel_size: Number of bytes of cache entries currently residing on + * the pinned entry list. * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_eviction() to the - * macro H5C__UPDATE_RP_FOR_EVICTION in an effort to squeeze - * a bit more performance out of the cache. + * pel_head_ptr: Pointer to the head of the doubly linked list of pinned + * but not protected entries. Note that cache entries on + * this list are linked by their next and prev fields. * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with - * the pre-processor, I'll have to remove them. + * This field is NULL if the list is empty. * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. + * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned + * but not protected entries. Note that cache entries on + * this list are linked by their next and prev fields. * - * JRM - 3/20/06 - * Pinned entries can't be evicted, so this entry should never - * be called on a pinned entry. Added assert to verify this. + * This field is NULL if the list is empty. * - * JRM -- 3/28/07 - * Added sanity checks for the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. * - *------------------------------------------------------------------------- - */ - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( !((entry_ptr)->is_pinned) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* If the entry is clean when it is evicted, it should be on the \ - * clean LRU list, if it was dirty, it should be on the dirty LRU list. \ - * Remove it from the appropriate list according to the value of the \ - * dirty flag. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ -} /* H5C__UPDATE_RP_FOR_EVICTION */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( !((entry_ptr)->is_pinned) ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ -} /* H5C__UPDATE_RP_FOR_EVICTION */ - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - - -/*------------------------------------------------------------------------- + * The cache must have a replacement policy, and the fields supporting this + * policy must be accessible from this structure. * - * Macro: H5C__UPDATE_RP_FOR_FLUSH + * While there has been interest in several replacement policies for + * this cache, the initial development schedule is tight. Thus I have + * elected to support only a modified LRU (least recently used) policy + * for the first cut. * - * Purpose: Update the replacement policy data structures for a flush - * of the specified cache entry. + * To further simplify matters, I have simply included the fields needed + * by the modified LRU in this structure. When and if we add support for + * other policies, it will probably be easiest to just add the necessary + * fields to this structure as well -- we only create one instance of this + * structure per file, so the overhead is not excessive. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. * - * Return: N/A + * Fields supporting the modified LRU policy: * - * Programmer: John Mainzer, 5/6/04 + * See most any OS text for a discussion of the LRU replacement policy. * - * Modifications: + * When operating in parallel mode, we must ensure that a read does not + * cause a write. If it does, the process will hang, as the write will + * be collective and the other processes will not know to participate. * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_flush() to the - * macro H5C__UPDATE_RP_FOR_FLUSH in an effort to squeeze - * a bit more performance out of the cache. + * To deal with this issue, I have modified the usual LRU policy by adding + * clean and dirty LRU lists to the usual LRU list. * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with - * pre-processor, I'll have to remove them. + * The clean LRU list is simply the regular LRU list with all dirty cache + * entries removed. * - * JRM - 7/28/04 - * Split macro into two versions, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. + * Similarly, the dirty LRU list is the regular LRU list with all the clean + * cache entries removed. * - * JRM - 3/20/06 - * While pinned entries can be flushed, they don't reside in - * the replacement policy data structures when unprotected. - * Thus I modified this macro to do nothing if the entry is - * pinned. + * When reading in parallel mode, we evict from the clean LRU list only. + * This implies that we must try to ensure that the clean LRU list is + * reasonably well stocked at all times. * - * JRM - 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. + * We attempt to do this by trying to flush enough entries on each write + * to keep the cLRU_list_size >= min_clean_size. * - *------------------------------------------------------------------------- - */ - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the \ - * head. \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* since the entry is being flushed or cleared, one would think \ - * that it must be dirty -- but that need not be the case. Use the \ - * dirty flag to infer whether the entry is on the clean or dirty \ - * LRU list, and remove it. Then insert it at the head of the \ - * clean LRU list. \ - * \ - * The function presumes that a dirty entry will be either cleared \ - * or flushed shortly, so it is OK if we put a dirty entry on the \ - * clean LRU list. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__UPDATE_RP_FOR_FLUSH */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ((entry_ptr)->is_pinned) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the \ - * head. \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__UPDATE_RP_FOR_FLUSH */ - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - - -/*------------------------------------------------------------------------- + * Even if we start with a completely clean cache, a sequence of protects + * without unprotects can empty the clean LRU list. In this case, the + * cache must grow temporarily. At the next write, we will attempt to + * evict enough entries to reduce index_size to less than max_cache_size. + * While this will usually be possible, all bets are off if enough entries + * are protected. * - * Macro: H5C__UPDATE_RP_FOR_INSERTION + * Discussions of the individual fields used by the modified LRU replacement + * policy follow: * - * Purpose: Update the replacement policy data structures for an - * insertion of the specified cache entry. + * LRU_list_len: Number of cache entries currently on the LRU list. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. + * Observe that LRU_list_len + pl_len must always equal + * index_len. * - * Return: N/A + * LRU_list_size: Number of bytes of cache entries currently residing on the + * LRU list. * - * Programmer: John Mainzer, 5/17/04 + * Observe that LRU_list_size + pl_size must always equal + * index_size. * - * Modifications: + * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache + * entries on this list are linked by their next and prev fields. * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_insertion() to the - * macro H5C__UPDATE_RP_FOR_INSERTION in an effort to squeeze - * a bit more performance out of the cache. + * This field is NULL if the list is empty. * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with - * pre-processor, I'll have to remove them. + * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache + * entries on this list are linked by their next and prev fields. * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. + * This field is NULL if the list is empty. * - * JRM - 3/10/06 - * This macro should never be called on a pinned entry. - * Inserted an assert to verify this. + * cLRU_list_len: Number of cache entries currently on the clean LRU list. * - * JRM - 8/9/06 - * Not any more. We must now allow insertion of pinned - * entries. Updated macro to support this. + * Observe that cLRU_list_len + dLRU_list_len must always + * equal LRU_list_len. * - * JRM - 3/28/07 - * Added sanity checks using the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. + * cLRU_list_size: Number of bytes of cache entries currently residing on + * the clean LRU list. * - *------------------------------------------------------------------------- - */ - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* insert the entry at the head of the clean or dirty LRU list as \ - * appropriate. \ - */ \ - \ - if ( entry_ptr->is_dirty ) { \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - } else { \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ -} - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - - -/*------------------------------------------------------------------------- + * Observe that cLRU_list_size + dLRU_list_size must always + * equal LRU_list_size. + * + * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * dLRU_list_len: Number of cache entries currently on the dirty LRU list. + * + * Observe that cLRU_list_len + dLRU_list_len must always + * equal LRU_list_len. + * + * dLRU_list_size: Number of cache entries currently on the dirty LRU list. + * + * Observe that cLRU_list_len + dLRU_list_len must always + * equal LRU_list_len. + * + * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list. + * Cache entries on this list are linked by their aux_next and + * aux_prev fields. + * + * This field is NULL if the list is empty. + * + * + * Automatic cache size adjustment: + * + * While the default cache size is adequate for most cases, we can run into + * cases where the default is too small. Ideally, we will let the user + * adjust the cache size as required. However, this is not possible in all + * cases. Thus I have added automatic cache size adjustment code. + * + * The configuration for the automatic cache size adjustment is stored in + * the structure described below: + * + * size_increase_possible: Depending on the configuration data given + * in the resize_ctl field, it may or may not be possible + * to increase the size of the cache. Rather than test for + * all the ways this can happen, we simply set this flag when + * we receive a new configuration. + * + * flash_size_increase_possible: Depending on the configuration data given + * in the resize_ctl field, it may or may not be possible + * for a flash size increase to occur. We set this flag + * whenever we receive a new configuration so as to avoid + * repeated calculations. + * + * flash_size_increase_threshold: If a flash cache size increase is possible, + * this field is used to store the minimum size of a new entry + * or size increase needed to trigger a flash cache size + * increase. Note that this field must be updated whenever + * the size of the cache is changed. + * + * size_decrease_possible: Depending on the configuration data given + * in the resize_ctl field, it may or may not be possible + * to decrease the size of the cache. Rather than test for + * all the ways this can happen, we simply set this flag when + * we receive a new configuration. + * + * cache_full: Boolean flag used to keep track of whether the cache is + * full, so we can refrain from increasing the size of a + * cache which hasn't used up the space allotted to it. + * + * The field is initialized to FALSE, and then set to TRUE + * whenever we attempt to make space in the cache. + * + * resize_enabled: This is another convenience flag which is set whenever + * a new set of values for resize_ctl are provided. Very + * simply, + * + * resize_enabled = size_increase_possible || + * size_decrease_possible; + * + * size_decreased: Boolean flag set to TRUE whenever the maximum cache + * size is decreased. The flag triggers a call to + * H5C_make_space_in_cache() on the next call to H5C_protect(). + * + * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration + * data for automatic cache resizing. + * + * epoch_markers_active: Integer field containing the number of epoch + * markers currently in use in the LRU list. This value + * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1]. + * + * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS. + * This array is used to track which epoch markers are currently + * in use. + * + * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1. + * + * To manage the epoch marker cache entries, it is necessary + * to track their order in the LRU list. This is done with + * epoch_marker_ringbuf. When markers are inserted at the + * head of the LRU list, the index of the marker in the + * epoch_markers array is inserted at the tail of the ring + * buffer. When it becomes the epoch_marker_active'th marker + * in the LRU list, it will have worked its way to the head + * of the ring buffer as well. This allows us to remove it + * without scanning the LRU list if such is required. * - * Macro: H5C__UPDATE_RP_FOR_PROTECT + * epoch_marker_ringbuf_first: Integer field containing the index of the + * first entry in the ring buffer. * - * Purpose: Update the replacement policy data structures for a - * protect of the specified cache entry. + * epoch_marker_ringbuf_last: Integer field containing the index of the + * last entry in the ring buffer. * - * To do this, unlink the specified entry from any data - * structures used by the replacement policy, and add the - * entry to the protected list. + * epoch_marker_ringbuf_size: Integer field containing the number of entries + * in the ring buffer. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. + * epoch_markers: Array of instances of H5C_cache_entry_t of length + * H5C__MAX_EPOCH_MARKERS. The entries are used as markers + * in the LRU list to identify cache entries that haven't + * been accessed for some (small) specified number of + * epochs. These entries (if any) can then be evicted and + * the cache size reduced -- ideally without evicting any + * of the current working set. Needless to say, the epoch + * length and the number of epochs before an unused entry + * must be chosen so that all, or almost all, the working + * set will be accessed before the limit. * - * Return: N/A + * Epoch markers only appear in the LRU list, never in + * the index or slist. While they are of type + * H5C__EPOCH_MARKER_TYPE, and have associated class + * functions, these functions should never be called. * - * Programmer: John Mainzer, 5/17/04 + * The addr fields of these instances of H5C_cache_entry_t + * are set to the index of the instance in the epoch_markers + * array, the size is set to 0, and the type field points + * to the constant structure epoch_marker_class defined + * in H5C.c. The next and prev fields are used as usual + * to link the entry into the LRU list. * - * Modifications: + * All other fields are unused. * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_protect() to the - * macro H5C__UPDATE_RP_FOR_PROTECT in an effort to squeeze - * a bit more performance out of the cache. * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with - * pre-processor, I'll have to remove them. + * Cache hit rate collection fields: * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. + * We supply the current cache hit rate on request, so we must keep a + * simple cache hit rate computation regardless of whether statistics + * collection is enabled. The following fields support this capability. * - * JRM - 3/17/06 - * Modified macro to attempt to remove pinned entriese from - * the pinned entry list instead of from the data structures - * maintained by the replacement policy. + * cache_hits: Number of cache hits since the last time the cache hit + * rate statistics were reset. Note that when automatic cache + * re-sizing is enabled, this field will be reset every automatic + * resize epoch. * - * JRM - 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. + * cache_accesses: Number of times the cache has been accessed while + * since the last since the last time the cache hit rate statistics + * were reset. Note that when automatic cache re-sizing is enabled, + * this field will be reset every automatic resize epoch. * - *------------------------------------------------------------------------- - */ - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - HDassert( (cache_ptr)->pel_len >= 0 ); \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* Similarly, remove the entry from the clean or dirty LRU list \ - * as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ - \ - /* Regardless of the replacement policy, or whether the entry is \ - * pinned, now add the entry to the protected list. \ - */ \ - \ - H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, \ - (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ -} /* H5C__UPDATE_RP_FOR_PROTECT */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - HDassert( (cache_ptr)->pel_len >= 0 ); \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list. */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ - \ - /* Regardless of the replacement policy, or whether the entry is \ - * pinned, now add the entry to the protected list. \ - */ \ - \ - H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, \ - (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ -} /* H5C__UPDATE_RP_FOR_PROTECT */ - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - - -/*------------------------------------------------------------------------- * - * Macro: H5C__UPDATE_RP_FOR_MOVE + * Statistics collection fields: + * + * When enabled, these fields are used to collect statistics as described + * below. The first set are collected only when H5C_COLLECT_CACHE_STATS + * is true. + * + * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been in cache when requested in + * the current epoch. + * + * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has not been in cache when + * requested in the current epoch. + * + * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * cells are used to record the number of times an entry with + * type id equal to the array index has been write protected + * in the current epoch. + * + * Observe that (hits + misses) = (write_protects + read_protects). + * + * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * cells are used to record the number of times an entry with + * type id equal to the array index has been read protected in + * the current epoch. + * + * Observe that (hits + misses) = (write_protects + read_protects). + * + * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. + * The cells are used to maximum number of simultaneous read + * protects on any entry with type id equal to the array index + * in the current epoch. + * + * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been inserted into the + * cache in the current epoch. + * + * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * The cells are used to record the number of times an entry + * with type id equal to the array index has been inserted + * pinned into the cache in the current epoch. + * + * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times a dirty entry with type + * id equal to the array index has been cleared in the current + * epoch. + * + * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been written to disk in the + * current epoch. + * + * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type id + * equal to the array index has been evicted from the cache in + * the current epoch. + * + * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been moved in the current + * epoch. + * + * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * The cells are used to record the number of times an entry + * with type id equal to the array index has been moved + * during its flush callback in the current epoch. + * + * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * The cells are used to record the number of times an entry + * with type id equal to the array index has been moved + * during a cache flush in the current epoch. * - * Purpose: Update the replacement policy data structures for a - * move of the specified cache entry. + * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been pinned in the current + * epoch. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. + * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been unpinned in the current + * epoch. * - * Return: N/A + * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the number of times an entry with type + * id equal to the array index has been marked dirty while pinned + * in the current epoch. * - * Programmer: John Mainzer, 5/17/04 + * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * cells are used to record the number of times an entry + * with type id equal to the array index has been flushed while + * pinned in the current epoch. * - *------------------------------------------------------------------------- - */ - -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - -#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ( (entry_ptr)->is_pinned ) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head. \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* remove the entry from either the clean or dirty LUR list as \ - * indicated by the was_dirty parameter \ - */ \ - if ( was_dirty ) { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), \ - (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_REMOVE((entry_ptr), \ - (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (fail_val)) \ - } \ - \ - /* insert the entry at the head of either the clean or dirty \ - * LRU list as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__UPDATE_RP_FOR_MOVE */ - -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - -#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - if ( ! ( (entry_ptr)->is_pinned ) ) { \ - \ - /* modified LRU specific code */ \ - \ - /* remove the entry from the LRU list, and re-insert it at the head. \ - */ \ - \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__UPDATE_RP_FOR_MOVE */ - -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * cells are used to record the number of times an entry + * with type id equal to the array index has been cleared while + * pinned in the current epoch. + * + * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * The cells are used to record the number of times an entry + * with type id equal to the array index has increased in + * size in the current epoch. + * + * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. + * The cells are used to record the number of times an entry + * with type id equal to the array index has decreased in + * size in the current epoch. + * + * entry_flush_size_changes: Array of int64 of length + * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record + * the number of times an entry with type id equal to the + * array index has changed size while in its flush callback. + * + * cache_flush_size_changes: Array of int64 of length + * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record + * the number of times an entry with type id equal to the + * array index has changed size during a cache flush + * + * total_ht_insertions: Number of times entries have been inserted into the + * hash table in the current epoch. + * + * total_ht_deletions: Number of times entries have been deleted from the + * hash table in the current epoch. + * + * successful_ht_searches: int64 containing the total number of successful + * searches of the hash table in the current epoch. + * + * total_successful_ht_search_depth: int64 containing the total number of + * entries other than the targets examined in successful + * searches of the hash table in the current epoch. + * + * failed_ht_searches: int64 containing the total number of unsuccessful + * searches of the hash table in the current epoch. + * + * total_failed_ht_search_depth: int64 containing the total number of + * entries examined in unsuccessful searches of the hash + * table in the current epoch. + * + * max_index_len: Largest value attained by the index_len field in the + * current epoch. + * + * max_index_size: Largest value attained by the index_size field in the + * current epoch. + * + * max_clean_index_size: Largest value attained by the clean_index_size field + * in the current epoch. + * + * max_dirty_index_size: Largest value attained by the dirty_index_size field + * in the current epoch. + * + * max_slist_len: Largest value attained by the slist_len field in the + * current epoch. + * + * max_slist_size: Largest value attained by the slist_size field in the + * current epoch. + * + * max_pl_len: Largest value attained by the pl_len field in the + * current epoch. + * + * max_pl_size: Largest value attained by the pl_size field in the + * current epoch. + * + * max_pel_len: Largest value attained by the pel_len field in the + * current epoch. + * + * max_pel_size: Largest value attained by the pel_size field in the + * current epoch. + * + * calls_to_msic: Total number of calls to H5C_make_space_in_cache + * + * total_entries_skipped_in_msic: Number of clean entries skipped while + * enforcing the min_clean_fraction in H5C_make_space_in_cache(). + * + * total_entries_scanned_in_msic: Number of clean entries skipped while + * enforcing the min_clean_fraction in H5C_make_space_in_cache(). + * + * max_entries_skipped_in_msic: Maximum number of clean entries skipped + * in any one call to H5C_make_space_in_cache(). + * + * max_entries_scanned_in_msic: Maximum number of entries scanned over + * in any one call to H5C_make_space_in_cache(). + * + * entries_scanned_to_make_space: Number of entries scanned only when looking + * for entries to evict in order to make space in cache. - -/*------------------------------------------------------------------------- + * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS + * and H5C_COLLECT_CACHE_ENTRY_STATS are true. * - * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE + * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times any single + * entry with type id equal to the array index has been + * accessed in the current epoch. * - * Purpose: Update the replacement policy data structures for a - * size change of the specified cache entry. + * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the minimum number of times any single + * entry with type id equal to the array index has been + * accessed in the current epoch. + * + * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times any single + * entry with type id equal to the array index has been cleared + * in the current epoch. * - * To do this, determine if the entry is pinned. If it is, - * update the size of the pinned entry list. + * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times any single + * entry with type id equal to the array index has been + * flushed in the current epoch. * - * If it isn't pinned, the entry must handled by the - * replacement policy. Update the appropriate replacement - * policy data structures. + * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum size of any single entry + * with type id equal to the array index that has resided in + * the cache in the current epoch. * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. + * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * are used to record the maximum number of times that any single + * entry with type id equal to the array index that has been + * marked as pinned in the cache in the current epoch. * - * Return: N/A * - * Programmer: John Mainzer, 8/23/06 + * Fields supporting testing: * - * Modifications: + * prefix Array of char used to prefix debugging output. The + * field is intended to allow marking of output of with + * the processes mpi rank. * - * JRM -- 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. + * get_entry_ptr_from_addr_counter: Counter used to track the number of + * times the H5C_get_entry_ptr_from_addr() function has been + * called successfully. This field is only defined when + * NDEBUG is not #defined. * - *------------------------------------------------------------------------- - */ + ****************************************************************************/ +struct H5C_t { + uint32_t magic; + hbool_t flush_in_progress; + FILE * trace_file_ptr; + void * aux_ptr; + int32_t max_type_id; + const char * (* type_name_table_ptr); + size_t max_cache_size; + size_t min_clean_size; + H5C_write_permitted_func_t check_write_permitted; + hbool_t write_permitted; + H5C_log_flush_func_t log_flush; + hbool_t evictions_enabled; -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + /* Fields for maintaining [hash table] index of entries */ + int32_t index_len; + size_t index_size; + size_t clean_index_size; + size_t dirty_index_size; + H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]); -#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( new_size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, \ - (entry_ptr)->size, \ - (new_size)); \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* Update the size of the LRU list */ \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, \ - (entry_ptr)->size, \ - (new_size)); \ - \ - /* Similarly, update the size of the clean or dirty LRU list as \ - * appropriate. At present, the entry must be clean, but that \ - * could change. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (entry_ptr)->size, \ - (new_size)); \ - \ - } else { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (entry_ptr)->size, \ - (new_size)); \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ - \ -} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ + /* Field to disable tag validation */ + hbool_t ignore_tags; -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + int32_t slist_len; + size_t slist_size; + H5SL_t * slist_ptr; + int32_t num_last_entries; +#if H5C_DO_SANITY_CHECKS + int64_t slist_len_increase; + int64_t slist_size_increase; +#endif /* H5C_DO_SANITY_CHECKS */ -#define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->size > 0 ); \ - HDassert( new_size > 0 ); \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, \ - (entry_ptr)->size, \ - (new_size)); \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* Update the size of the LRU list */ \ - \ - H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, \ - (entry_ptr)->size, \ - (new_size)); \ - \ - /* End modified LRU specific code. */ \ - } \ - \ -} /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */ + /* Fields for tracking protected entries */ + int32_t pl_len; + size_t pl_size; + H5C_cache_entry_t * pl_head_ptr; + H5C_cache_entry_t * pl_tail_ptr; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + /* Fields for tracking pinned entries */ + int32_t pel_len; + size_t pel_size; + H5C_cache_entry_t * pel_head_ptr; + H5C_cache_entry_t * pel_tail_ptr; - -/*------------------------------------------------------------------------- - * - * Macro: H5C__UPDATE_RP_FOR_UNPIN - * - * Purpose: Update the replacement policy data structures for an - * unpin of the specified cache entry. - * - * To do this, unlink the specified entry from the protected - * entry list, and re-insert it in the data structures used - * by the current replacement policy. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the macro - * should switch on the current policy and act accordingly. - * - * Return: N/A - * - * Programmer: John Mainzer, 3/22/06 - * - * Modifications: - * - * JRM -- 3/28/07 - * Added sanity checks based on the new is_read_only and - * ro_ref_count fields of struct H5C_cache_entry_t. - * - *------------------------------------------------------------------------- - */ + /* Fields for complete LRU list of entries */ + int32_t LRU_list_len; + size_t LRU_list_size; + H5C_cache_entry_t * LRU_head_ptr; + H5C_cache_entry_t * LRU_tail_ptr; -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + /* Fields for clean LRU list of entries */ + int32_t cLRU_list_len; + size_t cLRU_list_size; + H5C_cache_entry_t * cLRU_head_ptr; + H5C_cache_entry_t * cLRU_tail_ptr; + + /* Fields for dirty LRU list of entries */ + int32_t dLRU_list_len; + size_t dLRU_list_size; + H5C_cache_entry_t * dLRU_head_ptr; + H5C_cache_entry_t * dLRU_tail_ptr; + + /* Fields for automatic cache size adjustment */ + hbool_t size_increase_possible; + hbool_t flash_size_increase_possible; + size_t flash_size_increase_threshold; + hbool_t size_decrease_possible; + hbool_t resize_enabled; + hbool_t cache_full; + hbool_t size_decreased; + H5C_auto_size_ctl_t resize_ctl; + + /* Fields for epoch markers used in automatic cache size adjustment */ + int32_t epoch_markers_active; + hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]; + int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]; + int32_t epoch_marker_ringbuf_first; + int32_t epoch_marker_ringbuf_last; + int32_t epoch_marker_ringbuf_size; + H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]; + + /* Fields for cache hit rate collection */ + int64_t cache_hits; + int64_t cache_accesses; + +#if H5C_COLLECT_CACHE_STATS + /* stats fields */ + int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t misses[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t write_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t insertions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t size_increases[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; + int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS + 1]; + + /* Fields for hash table operations */ + int64_t total_ht_insertions; + int64_t total_ht_deletions; + int64_t successful_ht_searches; + int64_t total_successful_ht_search_depth; + int64_t failed_ht_searches; + int64_t total_failed_ht_search_depth; + int32_t max_index_len; + size_t max_index_size; + size_t max_clean_index_size; + size_t max_dirty_index_size; -#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->is_pinned); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* Regardless of the replacement policy, remove the entry from the \ - * pinned entry list. \ - */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - HDassert( (cache_ptr)->pel_len >= 0 ); \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* Similarly, insert the entry at the head of either the clean \ - * or dirty LRU list as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, \ - (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), \ - (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, \ - (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - \ -} /* H5C__UPDATE_RP_FOR_UNPIN */ + /* Fields for in-order skip list */ + int32_t max_slist_len; + size_t max_slist_size; -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + /* Fields for protected entry list */ + int32_t max_pl_len; + size_t max_pl_size; -#define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( !((entry_ptr)->is_protected) ); \ - HDassert( !((entry_ptr)->is_read_only) ); \ - HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \ - HDassert( (entry_ptr)->is_pinned); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* Regardless of the replacement policy, remove the entry from the \ - * pinned entry list. \ - */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - HDassert( (cache_ptr)->pel_len >= 0 ); \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - \ -} /* H5C__UPDATE_RP_FOR_UNPIN */ + /* Fields for pinned entry list */ + int32_t max_pel_len; + size_t max_pel_size; -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ + /* Fields for tacking 'make space in cache' (msic) operations */ + int64_t calls_to_msic; + int64_t total_entries_skipped_in_msic; + int64_t total_entries_scanned_in_msic; + int32_t max_entries_skipped_in_msic; + int32_t max_entries_scanned_in_msic; + int64_t entries_scanned_to_make_space; - -/*------------------------------------------------------------------------- - * - * Macro: H5C__UPDATE_RP_FOR_UNPROTECT - * - * Purpose: Update the replacement policy data structures for an - * unprotect of the specified cache entry. - * - * To do this, unlink the specified entry from the protected - * list, and re-insert it in the data structures used by the - * current replacement policy. - * - * At present, we only support the modified LRU policy, so - * this function deals with that case unconditionally. If - * we ever support other replacement policies, the function - * should switch on the current policy and act accordingly. - * - * Return: N/A - * - * Programmer: John Mainzer, 5/19/04 - * - * Modifications: - * - * JRM - 7/27/04 - * Converted the function H5C_update_rp_for_unprotect() to - * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to - * squeeze a bit more performance out of the cache. - * - * At least for the first cut, I am leaving the comments and - * white space in the macro. If they cause dificulties with - * pre-processor, I'll have to remove them. - * - * JRM - 7/28/04 - * Split macro into two version, one supporting the clean and - * dirty LRU lists, and the other not. Yet another attempt - * at optimization. - * - * JRM - 3/17/06 - * Modified macro to put pinned entries on the pinned entry - * list instead of inserting them in the data structures - * maintained by the replacement policy. - * - *------------------------------------------------------------------------- - */ +#if H5C_COLLECT_CACHE_ENTRY_STATS + int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1]; + size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1]; + int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1]; +#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ +#endif /* H5C_COLLECT_CACHE_STATS */ -#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS + char prefix[H5C__PREFIX_LEN]; -#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->is_protected); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* Regardless of the replacement policy, remove the entry from the \ - * protected list. \ - */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* Similarly, insert the entry at the head of either the clean or \ - * dirty LRU list as appropriate. \ - */ \ - \ - if ( (entry_ptr)->is_dirty ) { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \ - (cache_ptr)->dLRU_tail_ptr, \ - (cache_ptr)->dLRU_list_len, \ - (cache_ptr)->dLRU_list_size, (fail_val)) \ - \ - } else { \ - \ - H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \ - (cache_ptr)->cLRU_tail_ptr, \ - (cache_ptr)->cLRU_list_len, \ - (cache_ptr)->cLRU_list_size, (fail_val)) \ - } \ - \ - /* End modified LRU specific code. */ \ - } \ - \ -} /* H5C__UPDATE_RP_FOR_UNPROTECT */ +#ifndef NDEBUG + int64_t get_entry_ptr_from_addr_counter; +#endif /* NDEBUG */ +}; -#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ +/*****************************/ +/* Package Private Variables */ +/*****************************/ -#define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \ -{ \ - HDassert( (cache_ptr) ); \ - HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \ - HDassert( (entry_ptr) ); \ - HDassert( (entry_ptr)->is_protected); \ - HDassert( (entry_ptr)->size > 0 ); \ - \ - /* Regardless of the replacement policy, remove the entry from the \ - * protected list. \ - */ \ - H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \ - (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \ - (cache_ptr)->pl_size, (fail_val)) \ - \ - if ( (entry_ptr)->is_pinned ) { \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \ - (cache_ptr)->pel_tail_ptr, \ - (cache_ptr)->pel_len, \ - (cache_ptr)->pel_size, (fail_val)) \ - \ - } else { \ - \ - /* modified LRU specific code */ \ - \ - /* insert the entry at the head of the LRU list. */ \ - \ - H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \ - (cache_ptr)->LRU_tail_ptr, \ - (cache_ptr)->LRU_list_len, \ - (cache_ptr)->LRU_list_size, (fail_val)) \ - \ - /* End modified LRU specific code. */ \ - } \ -} /* H5C__UPDATE_RP_FOR_UNPROTECT */ -#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ +/******************************/ +/* Package Private Prototypes */ +/******************************/ #endif /* _H5Cpkg_H */ diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 301dd19..3413381 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -36,6 +36,9 @@ #include "H5private.h" /* Generic Functions */ #include "H5Fprivate.h" /* File access */ +/**************************/ +/* Library Private Macros */ +/**************************/ #define H5C_DO_SANITY_CHECKS 0 #define H5C_DO_TAGGING_SANITY_CHECKS 1 @@ -61,36 +64,162 @@ * H5C_COLLECT_CACHE_STATS is also defined to true. */ #if H5C_COLLECT_CACHE_STATS - #define H5C_COLLECT_CACHE_ENTRY_STATS 1 - #else - #define H5C_COLLECT_CACHE_ENTRY_STATS 0 - #endif /* H5C_COLLECT_CACHE_STATS */ - #ifdef H5_HAVE_PARALLEL - /* we must maintain the clean and dirty LRU lists when we are compiled * with parallel support. */ #define H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 1 - #else /* H5_HAVE_PARALLEL */ - /* The clean and dirty LRU lists don't buy us anything here -- we may * want them on for testing on occasion, but in general they should be * off. */ #define H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS 0 - #endif /* H5_HAVE_PARALLEL */ +/* Flags returned from the 'flush' callback */ +#define H5C_CALLBACK__NO_FLAGS_SET 0x0 +#define H5C_CALLBACK__SIZE_CHANGED_FLAG 0x1 +#define H5C_CALLBACK__MOVED_FLAG 0x2 -/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */ +/* Upper and lower limits on cache size. These limits are picked + * out of a hat -- you should be able to change them as necessary. + * + * However, if you need a very big cache, you should also increase the + * size of the hash table (H5C__HASH_TABLE_LEN in H5Cpkg.h). The current + * upper bound on cache size is rather large for the current hash table + * size. + */ +#define H5C__MAX_MAX_CACHE_SIZE ((size_t)(128 * 1024 * 1024)) +#define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024)) + +/* Default max cache size and min clean size are give here to make + * them generally accessable. + */ +#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024)) +#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024)) + +/* Maximum height of flush dependency relationships between entries. This is + * currently tuned to the extensible array (H5EA) data structure, which only + * requires 6 levels of dependency (i.e. heights 0-6) (actually, the extensible + * array needs 4 levels, plus another 2 levels are needed: one for the layer + * under the extensible array and one for the layer above it). + */ +#define H5C__NUM_FLUSH_DEP_HEIGHTS 6 + +#ifndef NDEBUG +/* Values for cache entry magic field */ +#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A +#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef +#endif /* NDEBUG */ + +/* Cache configuration validation definitions */ +#define H5C_RESIZE_CFG__VALIDATE_GENERAL 0x1 +#define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2 +#define H5C_RESIZE_CFG__VALIDATE_DECREMENT 0x4 +#define H5C_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8 +#define H5C_RESIZE_CFG__VALIDATE_ALL \ +( \ + H5C_RESIZE_CFG__VALIDATE_GENERAL | \ + H5C_RESIZE_CFG__VALIDATE_INCREMENT | \ + H5C_RESIZE_CFG__VALIDATE_DECREMENT | \ + H5C_RESIZE_CFG__VALIDATE_INTERACTIONS \ +) + +/* Cache configuration versions */ +#define H5C__CURR_AUTO_SIZE_CTL_VER 1 +#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1 + +/* Number of epoch markers active */ +#define H5C__MAX_EPOCH_MARKERS 10 + +/* Default configuration settings */ +#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f +#define H5C__DEF_AR_LOWER_THRESHHOLD 0.9f +#define H5C__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024)) +#define H5C__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024)) +#define H5C__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024)) +#define H5C__DEF_AR_MIN_CLEAN_FRAC 0.5f +#define H5C__DEF_AR_INCREMENT 2.0f +#define H5C__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024)) +#define H5C__DEF_AR_FLASH_MULTIPLE 1.0f +#define H5C__DEV_AR_FLASH_THRESHOLD 0.25f +#define H5C__DEF_AR_DECREMENT 0.9f +#define H5C__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024)) +#define H5C__DEF_AR_EPCHS_B4_EVICT 3 +#define H5C__DEF_AR_EMPTY_RESERVE 0.05f +#define H5C__MIN_AR_EPOCH_LENGTH 100 +#define H5C__DEF_AR_EPOCH_LENGTH 50000 +#define H5C__MAX_AR_EPOCH_LENGTH 1000000 + +/* #defines of flags used in the flags parameters in some of the + * cache calls. Note that not all flags are applicable + * to all function calls. Flags that don't apply to a particular + * function are ignored in that function. + * + * These flags apply to all function calls: + * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls) + * + * + * These flags apply to H5C_insert_entry(): + * H5C__SET_FLUSH_MARKER_FLAG + * H5C__PIN_ENTRY_FLAG + * + * These flags apply to H5C_protect() + * H5C__READ_ONLY_FLAG + * + * These flags apply to H5C_unprotect(): + * H5C__SET_FLUSH_MARKER_FLAG + * H5C__DELETED_FLAG + * H5C__DIRTIED_FLAG + * H5C__PIN_ENTRY_FLAG + * H5C__UNPIN_ENTRY_FLAG + * H5C__FREE_FILE_SPACE_FLAG + * H5C__TAKE_OWNERSHIP_FLAG + * + * These flags apply to H5C_expunge_entry(): + * H5C__FREE_FILE_SPACE_FLAG + * + * These flags apply to H5C_flush_cache(): + * H5C__FLUSH_INVALIDATE_FLAG + * H5C__FLUSH_CLEAR_ONLY_FLAG + * H5C__FLUSH_MARKED_ENTRIES_FLAG + * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination + * with H5C__FLUSH_INVALIDATE_FLAG) + * + * These flags apply to H5C_flush_single_entry(): + * H5C__FLUSH_INVALIDATE_FLAG + * H5C__FLUSH_CLEAR_ONLY_FLAG + * H5C__FLUSH_MARKED_ENTRIES_FLAG + * H5C__TAKE_OWNERSHIP_FLAG + */ +#define H5C__NO_FLAGS_SET 0x0000 +#define H5C__SET_FLUSH_MARKER_FLAG 0x0001 +#define H5C__DELETED_FLAG 0x0002 +#define H5C__DIRTIED_FLAG 0x0004 +#define H5C__PIN_ENTRY_FLAG 0x0008 +#define H5C__UNPIN_ENTRY_FLAG 0x0010 +#define H5C__FLUSH_INVALIDATE_FLAG 0x0020 +#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0040 +#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080 +#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100 +#define H5C__READ_ONLY_FLAG 0x0200 +#define H5C__FREE_FILE_SPACE_FLAG 0x0800 +#define H5C__TAKE_OWNERSHIP_FLAG 0x1000 +#define H5C__FLUSH_LAST_FLAG 0x2000 +#define H5C__FLUSH_COLLECTIVELY_FLAG 0x4000 + + +/****************************/ +/* Library Private Typedefs */ +/****************************/ +/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */ typedef struct H5C_t H5C_t; @@ -121,10 +250,6 @@ typedef struct H5C_t H5C_t; * Note that the space allocated on disk may not be contiguous. */ -#define H5C_CALLBACK__NO_FLAGS_SET 0x0 -#define H5C_CALLBACK__SIZE_CHANGED_FLAG 0x1 -#define H5C_CALLBACK__MOVED_FLAG 0x2 - /* Actions that can be reported to 'notify' client callback */ typedef enum H5C_notify_action_t { H5C_NOTIFY_ACTION_AFTER_INSERT, /* Entry has been added to the cache */ @@ -135,27 +260,18 @@ typedef enum H5C_notify_action_t { H5C_NOTIFY_ACTION_BEFORE_EVICT /* Entry is about to be evicted from cache */ } H5C_notify_action_t; -typedef void *(*H5C_load_func_t)(H5F_t *f, - hid_t dxpl_id, - haddr_t addr, - void *udata); -typedef herr_t (*H5C_flush_func_t)(H5F_t *f, - hid_t dxpl_id, - hbool_t dest, - haddr_t addr, - void *thing, - unsigned * flags_ptr); -typedef herr_t (*H5C_dest_func_t)(H5F_t *f, - void *thing); -typedef herr_t (*H5C_clear_func_t)(H5F_t *f, - void *thing, - hbool_t dest); -typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action, - void *thing); -typedef herr_t (*H5C_size_func_t)(const H5F_t *f, - const void *thing, - size_t *size_ptr); - +/* Cache client callback function pointers */ +typedef void *(*H5C_load_func_t)(H5F_t *f, hid_t dxpl_id, haddr_t addr, + void *udata); +typedef herr_t (*H5C_flush_func_t)(H5F_t *f, hid_t dxpl_id, hbool_t dest, + haddr_t addr, void *thing, unsigned *flags_ptr); +typedef herr_t (*H5C_dest_func_t)(H5F_t *f, void *thing); +typedef herr_t (*H5C_clear_func_t)(H5F_t *f, void *thing, hbool_t dest); +typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action, void *thing); +typedef herr_t (*H5C_size_func_t)(const H5F_t *f, const void *thing, + size_t *size_ptr); + +/* Metadata cache client class definition */ typedef struct H5C_class_t { int id; H5C_load_func_t load; @@ -166,47 +282,11 @@ typedef struct H5C_class_t { H5C_size_func_t size; } H5C_class_t; - -/* Type defintions of call back functions used by the cache as a whole */ - +/* Type defintions of callback functions used by the cache as a whole */ typedef herr_t (*H5C_write_permitted_func_t)(const H5F_t *f, - hbool_t * write_permitted_ptr); - -typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, - haddr_t addr, - hbool_t was_dirty, - unsigned flags); - -/* Upper and lower limits on cache size. These limits are picked - * out of a hat -- you should be able to change them as necessary. - * - * However, if you need a very big cache, you should also increase the - * size of the hash table (H5C__HASH_TABLE_LEN in H5Cpkg.h). The current - * upper bound on cache size is rather large for the current hash table - * size. - */ - -#define H5C__MAX_MAX_CACHE_SIZE ((size_t)(128 * 1024 * 1024)) -#define H5C__MIN_MAX_CACHE_SIZE ((size_t)(1024)) - - -/* Default max cache size and min clean size are give here to make - * them generally accessable. - */ - -#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024)) -#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024)) - -/* Maximum height of flush dependency relationships between entries. This is - * currently tuned to the extensible array (H5EA) data structure, which only - * requires 6 levels of dependency (i.e. heights 0-6) (actually, the extensible - * array needs 4 levels, plus another 2 levels are needed: one for the layer - * under the extensible array and one for the layer above it). - */ - -#define H5C__NUM_FLUSH_DEP_HEIGHTS 6 - - + hbool_t *write_permitted_ptr); +typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, + hbool_t was_dirty, unsigned flags); /**************************************************************************** * @@ -578,21 +658,14 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, * been pinned in cache in its life time. * ****************************************************************************/ - -#ifndef NDEBUG -#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A -#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef -#endif /* NDEBUG */ - -typedef struct H5C_cache_entry_t -{ +typedef struct H5C_cache_entry_t { #ifndef NDEBUG uint32_t magic; #endif /* NDEBUG */ - H5C_t * cache_ptr; + H5C_t * cache_ptr; haddr_t addr; size_t size; - const H5C_class_t * type; + const H5C_class_t * type; haddr_t tag; hbool_t is_dirty; hbool_t dirtied; @@ -613,39 +686,31 @@ typedef struct H5C_cache_entry_t hbool_t free_file_space_on_destroy; /* fields supporting the 'flush dependency' feature: */ - - struct H5C_cache_entry_t * flush_dep_parent; + struct H5C_cache_entry_t * flush_dep_parent; uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS]; unsigned flush_dep_height; hbool_t pinned_from_client; hbool_t pinned_from_cache; /* fields supporting the hash table: */ - - struct H5C_cache_entry_t * ht_next; - struct H5C_cache_entry_t * ht_prev; + struct H5C_cache_entry_t * ht_next; + struct H5C_cache_entry_t * ht_prev; /* fields supporting replacement policies: */ - - struct H5C_cache_entry_t * next; - struct H5C_cache_entry_t * prev; - struct H5C_cache_entry_t * aux_next; - struct H5C_cache_entry_t * aux_prev; + struct H5C_cache_entry_t * next; + struct H5C_cache_entry_t * prev; + struct H5C_cache_entry_t * aux_next; + struct H5C_cache_entry_t * aux_prev; #if H5C_COLLECT_CACHE_ENTRY_STATS - /* cache entry stats fields */ - int32_t accesses; int32_t clears; int32_t flushes; int32_t pins; - #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */ - } H5C_cache_entry_t; - /**************************************************************************** * * structure H5C_auto_size_ctl_t @@ -886,41 +951,6 @@ typedef struct H5C_cache_entry_t * ****************************************************************************/ -#define H5C_RESIZE_CFG__VALIDATE_GENERAL 0x1 -#define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2 -#define H5C_RESIZE_CFG__VALIDATE_DECREMENT 0x4 -#define H5C_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8 -#define H5C_RESIZE_CFG__VALIDATE_ALL \ -( \ - H5C_RESIZE_CFG__VALIDATE_GENERAL | \ - H5C_RESIZE_CFG__VALIDATE_INCREMENT | \ - H5C_RESIZE_CFG__VALIDATE_DECREMENT | \ - H5C_RESIZE_CFG__VALIDATE_INTERACTIONS \ -) - -#define H5C__CURR_AUTO_SIZE_CTL_VER 1 -#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1 - -#define H5C__MAX_EPOCH_MARKERS 10 - -#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f -#define H5C__DEF_AR_LOWER_THRESHHOLD 0.9f -#define H5C__DEF_AR_MAX_SIZE ((size_t)(16 * 1024 * 1024)) -#define H5C__DEF_AR_INIT_SIZE ((size_t)( 1 * 1024 * 1024)) -#define H5C__DEF_AR_MIN_SIZE ((size_t)( 1 * 1024 * 1024)) -#define H5C__DEF_AR_MIN_CLEAN_FRAC 0.5f -#define H5C__DEF_AR_INCREMENT 2.0f -#define H5C__DEF_AR_MAX_INCREMENT ((size_t)( 2 * 1024 * 1024)) -#define H5C__DEF_AR_FLASH_MULTIPLE 1.0f -#define H5C__DEV_AR_FLASH_THRESHOLD 0.25f -#define H5C__DEF_AR_DECREMENT 0.9f -#define H5C__DEF_AR_MAX_DECREMENT ((size_t)( 1 * 1024 * 1024)) -#define H5C__DEF_AR_EPCHS_B4_EVICT 3 -#define H5C__DEF_AR_EMPTY_RESERVE 0.05f -#define H5C__MIN_AR_EPOCH_LENGTH 100 -#define H5C__DEF_AR_EPOCH_LENGTH 50000 -#define H5C__MAX_AR_EPOCH_LENGTH 1000000 - enum H5C_resize_status { in_spec, @@ -934,301 +964,118 @@ enum H5C_resize_status not_full }; /* enum H5C_resize_conditions */ -typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr, - int32_t version, - double hit_rate, - enum H5C_resize_status status, - size_t old_max_cache_size, - size_t new_max_cache_size, - size_t old_min_clean_size, - size_t new_min_clean_size); +typedef void (*H5C_auto_resize_rpt_fcn)(H5C_t * cache_ptr, int32_t version, + double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size, + size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size); -typedef struct H5C_auto_size_ctl_t -{ +typedef struct H5C_auto_size_ctl_t { /* general configuration fields: */ int32_t version; H5C_auto_resize_rpt_fcn rpt_fcn; - hbool_t set_initial_size; size_t initial_size; - double min_clean_fraction; - size_t max_size; size_t min_size; - int64_t epoch_length; - /* size increase control fields: */ enum H5C_cache_incr_mode incr_mode; - double lower_hr_threshold; - double increment; - hbool_t apply_max_increment; size_t max_increment; - enum H5C_cache_flash_incr_mode flash_incr_mode; double flash_multiple; double flash_threshold; - /* size decrease control fields: */ enum H5C_cache_decr_mode decr_mode; - double upper_hr_threshold; - double decrement; - hbool_t apply_max_decrement; size_t max_decrement; - int32_t epochs_before_eviction; - hbool_t apply_empty_reserve; double empty_reserve; - } H5C_auto_size_ctl_t; - -/* - * Library prototypes. - */ - -/* #defines of flags used in the flags parameters in some of the - * following function calls. Note that not all flags are applicable - * to all function calls. Flags that don't apply to a particular - * function are ignored in that function. - * - * These flags apply to all function calls: - * - * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls) - * - * - * These flags apply to H5C_insert_entry(): - * - * H5C__SET_FLUSH_MARKER_FLAG - * H5C__PIN_ENTRY_FLAG - * - * These flags apply to H5C_protect() - * - * H5C__READ_ONLY_FLAG - * - * These flags apply to H5C_unprotect(): - * - * H5C__SET_FLUSH_MARKER_FLAG - * H5C__DELETED_FLAG - * H5C__DIRTIED_FLAG - * H5C__PIN_ENTRY_FLAG - * H5C__UNPIN_ENTRY_FLAG - * H5C__FREE_FILE_SPACE_FLAG - * H5C__TAKE_OWNERSHIP_FLAG - * - * These flags apply to H5C_expunge_entry(): - * - * H5C__FREE_FILE_SPACE_FLAG - * - * These flags apply to H5C_flush_cache(): - * - * H5C__FLUSH_INVALIDATE_FLAG - * H5C__FLUSH_CLEAR_ONLY_FLAG - * H5C__FLUSH_MARKED_ENTRIES_FLAG - * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination - * with H5C__FLUSH_INVALIDATE_FLAG) - * - * These flags apply to H5C_flush_single_entry(): - * - * H5C__FLUSH_INVALIDATE_FLAG - * H5C__FLUSH_CLEAR_ONLY_FLAG - * H5C__FLUSH_MARKED_ENTRIES_FLAG - * H5C__TAKE_OWNERSHIP_FLAG - */ - -#define H5C__NO_FLAGS_SET 0x0000 -#define H5C__SET_FLUSH_MARKER_FLAG 0x0001 -#define H5C__DELETED_FLAG 0x0002 -#define H5C__DIRTIED_FLAG 0x0004 -#define H5C__PIN_ENTRY_FLAG 0x0008 -#define H5C__UNPIN_ENTRY_FLAG 0x0010 -#define H5C__FLUSH_INVALIDATE_FLAG 0x0020 -#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0040 -#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080 -#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100 -#define H5C__READ_ONLY_FLAG 0x0200 -#define H5C__FREE_FILE_SPACE_FLAG 0x0800 -#define H5C__TAKE_OWNERSHIP_FLAG 0x1000 -#define H5C__FLUSH_LAST_FLAG 0x2000 -#define H5C__FLUSH_COLLECTIVELY_FLAG 0x4000 - -#ifdef H5_HAVE_PARALLEL -H5_DLL herr_t H5C_apply_candidate_list(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - H5C_t * cache_ptr, - int num_candidates, - haddr_t * candidates_list_ptr, - int mpi_rank, - int mpi_size); - -H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr); - -H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr); -#endif /* H5_HAVE_PARALLEL */ - -H5_DLL H5C_t * H5C_create(size_t max_cache_size, - size_t min_clean_size, - int max_type_id, - const char * (* type_name_table_ptr), - H5C_write_permitted_func_t check_write_permitted, - hbool_t write_permitted, - H5C_log_flush_func_t log_flush, - void * aux_ptr); - -H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr, - int32_t version, - double hit_rate, - enum H5C_resize_status status, - size_t old_max_cache_size, - size_t new_max_cache_size, - size_t old_min_clean_size, - size_t new_min_clean_size); - -H5_DLL herr_t H5C_dest(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id); - -H5_DLL herr_t H5C_expunge_entry(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - const H5C_class_t * type, - haddr_t addr, - unsigned flags); - -H5_DLL herr_t H5C_flush_cache(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - unsigned flags); - -H5_DLL herr_t H5C_flush_to_min_clean(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id); - -H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t * cache_ptr, - H5C_auto_size_ctl_t *config_ptr); - -H5_DLL herr_t H5C_get_cache_size(H5C_t * cache_ptr, - size_t * max_size_ptr, - size_t * min_clean_size_ptr, - size_t * cur_size_ptr, - int32_t * cur_num_entries_ptr); - -H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t * cache_ptr, - double * hit_rate_ptr); - -H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, - haddr_t addr, - size_t * size_ptr, - hbool_t * in_cache_ptr, - hbool_t * is_dirty_ptr, - hbool_t * is_protected_ptr, - hbool_t * is_pinned_ptr, - hbool_t * is_flush_dep_parent_ptr, - hbool_t * is_flush_dep_child_ptr); - -H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t * cache_ptr, - hbool_t * evictions_enabled_ptr); - +/***************************************/ +/* Library-private Function Prototypes */ +/***************************************/ + +H5_DLL H5C_t *H5C_create(size_t max_cache_size, size_t min_clean_size, + int max_type_id, const char *(*type_name_table_ptr), + H5C_write_permitted_func_t check_write_permitted, hbool_t write_permitted, + H5C_log_flush_func_t log_flush, void *aux_ptr); +H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, int32_t version, + double hit_rate, enum H5C_resize_status status, + size_t old_max_cache_size, size_t new_max_cache_size, + size_t old_min_clean_size, size_t new_min_clean_size); +H5_DLL herr_t H5C_dest(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id); +H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, + const H5C_class_t *type, haddr_t addr, unsigned flags); +H5_DLL herr_t H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, + unsigned flags); +H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id); +H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr, + H5C_auto_size_ctl_t *config_ptr); +H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr, + size_t *min_clean_size_ptr, size_t *cur_size_ptr, + int32_t *cur_num_entries_ptr); +H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr); +H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr, + size_t *size_ptr, hbool_t *in_cache_ptr, hbool_t *is_dirty_ptr, + hbool_t *is_protected_ptr, hbool_t *is_pinned_ptr, + hbool_t *is_flush_dep_parent_ptr, hbool_t *is_flush_dep_child_ptr); +H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr); H5_DLL FILE *H5C_get_trace_file_ptr(const H5C_t *cache_ptr); H5_DLL FILE *H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr); - -H5_DLL herr_t H5C_insert_entry(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - const H5C_class_t * type, - haddr_t addr, - void * thing, - unsigned int flags); - -H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - int32_t ce_array_len, - haddr_t *ce_array_ptr); - +H5_DLL herr_t H5C_insert_entry(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, + const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags); +H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, int32_t ce_array_len, haddr_t *ce_array_ptr); H5_DLL herr_t H5C_mark_entry_dirty(void *thing); - -H5_DLL herr_t H5C_move_entry(H5C_t * cache_ptr, - const H5C_class_t * type, - haddr_t old_addr, - haddr_t new_addr); - +H5_DLL herr_t H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, + haddr_t old_addr, haddr_t new_addr); H5_DLL herr_t H5C_pin_protected_entry(void *thing); - H5_DLL herr_t H5C_create_flush_dependency(void *parent_thing, void *child_thing); - -H5_DLL void * H5C_protect(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - const H5C_class_t * type, - haddr_t addr, - void * udata, - unsigned flags); - -H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr); - +H5_DLL void * H5C_protect(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, + const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags); +H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr); H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size); - H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, - H5C_auto_size_ctl_t *config_ptr); - -H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, - hbool_t evictions_enabled); - -H5_DLL herr_t H5C_set_prefix(H5C_t * cache_ptr, char * prefix); - -H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t * cache_ptr, - FILE * trace_file_ptr); - -H5_DLL herr_t H5C_stats(H5C_t * cache_ptr, - const char * cache_name, - hbool_t display_detailed_stats); - -H5_DLL void H5C_stats__reset(H5C_t * cache_ptr); - -H5_DLL herr_t H5C_dump_cache(H5C_t * cache_ptr, - const char * cache_name); - + H5C_auto_size_ctl_t *config_ptr); +H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled); +H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix); +H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t *cache_ptr, FILE *trace_file_ptr); +H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name, + hbool_t display_detailed_stats); +H5_DLL void H5C_stats__reset(H5C_t *cache_ptr); +H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name); H5_DLL herr_t H5C_unpin_entry(void *thing); - H5_DLL herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing); +H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, + const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags); +H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, + unsigned int tests); +H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr); +H5_DLL void H5C_retag_copied_metadata(H5C_t *cache_ptr, haddr_t metadata_tag); -H5_DLL herr_t H5C_unprotect(H5F_t * f, - hid_t primary_dxpl_id, - hid_t secondary_dxpl_id, - const H5C_class_t * type, - haddr_t addr, - void * thing, - unsigned int flags); - -H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr, - unsigned int tests); - -H5_DLL herr_t H5C_ignore_tags(H5C_t * cache_ptr); - -H5_DLL void H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag); +#ifdef H5_HAVE_PARALLEL +H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t primary_dxpl_id, + hid_t secondary_dxpl_id, H5C_t *cache_ptr, int num_candidates, + haddr_t *candidates_list_ptr, int mpi_rank, int mpi_size); +H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr); +H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr); +#endif /* H5_HAVE_PARALLEL */ #ifndef NDEBUG /* debugging functions */ - H5_DLL herr_t H5C_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr, - void ** entry_ptr_ptr); - -H5_DLL herr_t H5C_verify_entry_type(const H5F_t * f, haddr_t addr, - const H5C_class_t * expected_type, - hbool_t * in_cache_ptr, - hbool_t * type_ok_ptr); - + void **entry_ptr_ptr); +H5_DLL herr_t H5C_verify_entry_type(const H5F_t *f, haddr_t addr, + const H5C_class_t *expected_type, hbool_t *in_cache_ptr, + hbool_t *type_ok_ptr); #endif /* NDEBUG */ #endif /* !_H5Cprivate_H */ diff --git a/test/cache.c b/test/cache.c index 9bf225e..85fbadb 100644 --- a/test/cache.c +++ b/test/cache.c @@ -235,7 +235,6 @@ static unsigned check_notify_cb(void); static unsigned smoke_check_1(void) { - const char * fcn_name = "smoke_check_1"; hbool_t show_progress = FALSE; int dirty_unprotects = FALSE; int dirty_destroys = FALSE; @@ -256,20 +255,20 @@ smoke_check_1(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024)); if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -287,7 +286,7 @@ smoke_check_1(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -305,7 +304,7 @@ smoke_check_1(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -323,7 +322,7 @@ smoke_check_1(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -334,7 +333,7 @@ smoke_check_1(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -347,7 +346,7 @@ smoke_check_1(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -358,7 +357,7 @@ smoke_check_1(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -371,13 +370,13 @@ smoke_check_1(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -387,7 +386,7 @@ smoke_check_1(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -420,7 +419,6 @@ smoke_check_1(void) static unsigned smoke_check_2(void) { - const char * fcn_name = "smoke_check_2"; hbool_t show_progress = FALSE; int dirty_unprotects = TRUE; int dirty_destroys = TRUE; @@ -441,20 +439,20 @@ smoke_check_2(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024)); if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -472,7 +470,7 @@ smoke_check_2(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -490,7 +488,7 @@ smoke_check_2(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -508,7 +506,7 @@ smoke_check_2(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -519,7 +517,7 @@ smoke_check_2(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -532,7 +530,7 @@ smoke_check_2(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -543,7 +541,7 @@ smoke_check_2(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -556,13 +554,13 @@ smoke_check_2(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -572,7 +570,7 @@ smoke_check_2(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -604,7 +602,6 @@ smoke_check_2(void) static unsigned smoke_check_3(void) { - const char * fcn_name = "smoke_check_3"; hbool_t show_progress = FALSE; int dirty_unprotects = FALSE; int dirty_destroys = FALSE; @@ -625,20 +622,20 @@ smoke_check_3(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024)); if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -656,7 +653,7 @@ smoke_check_3(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -674,7 +671,7 @@ smoke_check_3(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -692,7 +689,7 @@ smoke_check_3(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -703,7 +700,7 @@ smoke_check_3(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -716,7 +713,7 @@ smoke_check_3(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -727,7 +724,7 @@ smoke_check_3(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -740,13 +737,13 @@ smoke_check_3(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -756,7 +753,7 @@ smoke_check_3(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -789,7 +786,6 @@ smoke_check_3(void) static unsigned smoke_check_4(void) { - const char * fcn_name = "smoke_check_4"; hbool_t show_progress = FALSE; int dirty_unprotects = TRUE; int dirty_destroys = TRUE; @@ -810,20 +806,20 @@ smoke_check_4(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024)); if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -841,7 +837,7 @@ smoke_check_4(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -859,7 +855,7 @@ smoke_check_4(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -877,7 +873,7 @@ smoke_check_4(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -888,7 +884,7 @@ smoke_check_4(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -901,7 +897,7 @@ smoke_check_4(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -912,7 +908,7 @@ smoke_check_4(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -925,13 +921,13 @@ smoke_check_4(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -941,7 +937,7 @@ smoke_check_4(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -974,7 +970,6 @@ smoke_check_4(void) static unsigned smoke_check_5(void) { - const char * fcn_name = "smoke_check_5"; herr_t result; hbool_t show_progress = FALSE; int dirty_unprotects = FALSE; @@ -1052,13 +1047,13 @@ smoke_check_5(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024)); @@ -1077,7 +1072,7 @@ smoke_check_5(void) if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1089,7 +1084,7 @@ smoke_check_5(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1101,7 +1096,7 @@ smoke_check_5(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1113,7 +1108,7 @@ smoke_check_5(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -1124,7 +1119,7 @@ smoke_check_5(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1137,7 +1132,7 @@ smoke_check_5(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -1148,7 +1143,7 @@ smoke_check_5(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1161,13 +1156,13 @@ smoke_check_5(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -1177,7 +1172,7 @@ smoke_check_5(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -1210,7 +1205,6 @@ smoke_check_5(void) static unsigned smoke_check_6(void) { - const char * fcn_name = "smoke_check_6"; herr_t result; hbool_t show_progress = FALSE; int dirty_unprotects = FALSE; @@ -1285,13 +1279,13 @@ smoke_check_6(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024)); @@ -1310,7 +1304,7 @@ smoke_check_6(void) if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1322,7 +1316,7 @@ smoke_check_6(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1334,7 +1328,7 @@ smoke_check_6(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1346,7 +1340,7 @@ smoke_check_6(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -1357,7 +1351,7 @@ smoke_check_6(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1370,7 +1364,7 @@ smoke_check_6(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -1381,7 +1375,7 @@ smoke_check_6(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1394,13 +1388,13 @@ smoke_check_6(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -1410,7 +1404,7 @@ smoke_check_6(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -1443,7 +1437,6 @@ smoke_check_6(void) static unsigned smoke_check_7(void) { - const char * fcn_name = "smoke_check_7"; herr_t result; hbool_t show_progress = FALSE; int dirty_unprotects = FALSE; @@ -1519,13 +1512,13 @@ smoke_check_7(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024)); @@ -1544,7 +1537,7 @@ smoke_check_7(void) if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1556,7 +1549,7 @@ smoke_check_7(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1568,7 +1561,7 @@ smoke_check_7(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1580,7 +1573,7 @@ smoke_check_7(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -1591,7 +1584,7 @@ smoke_check_7(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1604,7 +1597,7 @@ smoke_check_7(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -1615,7 +1608,7 @@ smoke_check_7(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1628,13 +1621,13 @@ smoke_check_7(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -1644,7 +1637,7 @@ smoke_check_7(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -1677,7 +1670,6 @@ smoke_check_7(void) static unsigned smoke_check_8(void) { - const char * fcn_name = "smoke_check_8"; herr_t result; hbool_t show_progress = FALSE; int dirty_unprotects = FALSE; @@ -1753,13 +1745,13 @@ smoke_check_8(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024)); cache_ptr = file_ptr->shared->cache; @@ -1777,7 +1769,7 @@ smoke_check_8(void) if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1789,7 +1781,7 @@ smoke_check_8(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1801,7 +1793,7 @@ smoke_check_8(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_row_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1813,7 +1805,7 @@ smoke_check_8(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -1824,7 +1816,7 @@ smoke_check_8(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_forward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1837,7 +1829,7 @@ smoke_check_8(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -1848,7 +1840,7 @@ smoke_check_8(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); hl_col_major_scan_backward(/* file_ptr */ file_ptr, /* max_index */ max_index, @@ -1861,13 +1853,13 @@ smoke_check_8(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -1877,7 +1869,7 @@ smoke_check_8(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -1911,7 +1903,6 @@ smoke_check_8(void) static unsigned smoke_check_9(void) { - const char * fcn_name = "smoke_check_9"; herr_t result; hbool_t show_progress = FALSE; int dirty_unprotects = FALSE; @@ -1935,13 +1926,13 @@ smoke_check_9(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024)); @@ -1951,7 +1942,7 @@ smoke_check_9(void) if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -1966,7 +1957,7 @@ smoke_check_9(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -1986,7 +1977,7 @@ smoke_check_9(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2001,7 +1992,7 @@ smoke_check_9(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions enabled \n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2019,7 +2010,7 @@ smoke_check_9(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2034,7 +2025,7 @@ smoke_check_9(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled \n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2052,7 +2043,7 @@ smoke_check_9(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2067,7 +2058,7 @@ smoke_check_9(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions enabled \n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -2078,7 +2069,7 @@ smoke_check_9(void) if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2093,7 +2084,7 @@ smoke_check_9(void) if ( show_progress ) /* 12 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2106,7 +2097,7 @@ smoke_check_9(void) if ( show_progress ) /* 13 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -2117,7 +2108,7 @@ smoke_check_9(void) if ( show_progress ) /* 14 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2132,7 +2123,7 @@ smoke_check_9(void) if ( show_progress ) /* 15 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions enabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2145,7 +2136,7 @@ smoke_check_9(void) if ( show_progress ) /* 16 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2161,13 +2152,13 @@ smoke_check_9(void) if ( show_progress ) /* 17 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 18 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -2177,7 +2168,7 @@ smoke_check_9(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -2211,7 +2202,6 @@ smoke_check_9(void) static unsigned smoke_check_10(void) { - const char * fcn_name = "smoke_check_10"; herr_t result; hbool_t show_progress = FALSE; int dirty_unprotects = TRUE; @@ -2235,13 +2225,13 @@ smoke_check_10(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024)); @@ -2249,7 +2239,7 @@ smoke_check_10(void) if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions enabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2267,7 +2257,7 @@ smoke_check_10(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2282,7 +2272,7 @@ smoke_check_10(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2300,7 +2290,7 @@ smoke_check_10(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2315,7 +2305,7 @@ smoke_check_10(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions enabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2333,7 +2323,7 @@ smoke_check_10(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2348,7 +2338,7 @@ smoke_check_10(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -2359,7 +2349,7 @@ smoke_check_10(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2374,7 +2364,7 @@ smoke_check_10(void) if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions enabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2387,7 +2377,7 @@ smoke_check_10(void) if ( show_progress ) /* 12 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2402,7 +2392,7 @@ smoke_check_10(void) if ( show_progress ) /* 13 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush all entries in the cache: */ @@ -2413,7 +2403,7 @@ smoke_check_10(void) if ( show_progress ) /* 14 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2428,7 +2418,7 @@ smoke_check_10(void) if ( show_progress ) /* 15 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions enabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_backward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2441,7 +2431,7 @@ smoke_check_10(void) if ( show_progress ) /* 16 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2456,13 +2446,13 @@ smoke_check_10(void) if ( show_progress ) /* 17 */ HDfprintf(stdout, "%s() - %0d -- pass = %d -- evictions disabled\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 18 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -2472,7 +2462,7 @@ smoke_check_10(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -2498,14 +2488,12 @@ smoke_check_10(void) * *------------------------------------------------------------------------- */ - static unsigned write_permitted_check(void) { #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS - const char * fcn_name = "write_permitted_check"; hbool_t show_progress = FALSE; hbool_t display_stats = FALSE; int32_t lag = 10; @@ -2522,20 +2510,20 @@ write_permitted_check(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); reset_entries(); if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); file_ptr = setup_cache((size_t)(1 * 1024 * 1024), (size_t)(0)); if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); row_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2553,7 +2541,7 @@ write_permitted_check(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); write_permitted = FALSE; @@ -2573,7 +2561,7 @@ write_permitted_check(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); write_permitted = TRUE; @@ -2593,7 +2581,7 @@ write_permitted_check(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* flush and destroy all entries in the cache: */ @@ -2604,7 +2592,7 @@ write_permitted_check(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); col_major_scan_forward(/* file_ptr */ file_ptr, /* lag */ lag, @@ -2617,7 +2605,7 @@ write_permitted_check(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); write_permitted = FALSE; @@ -2634,13 +2622,13 @@ write_permitted_check(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); takedown_cache(file_ptr, display_stats, TRUE); if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); verify_clean(); verify_unprotected(); @@ -2650,7 +2638,7 @@ write_permitted_check(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ @@ -2690,7 +2678,6 @@ write_permitted_check(void) static unsigned check_insert_entry(void) { - const char * fcn_name = "check_insert_entry"; int entry_type = PICO_ENTRY_TYPE; int i; herr_t result; @@ -2956,7 +2943,7 @@ check_insert_entry(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -2983,7 +2970,6 @@ check_insert_entry(void) static unsigned check_flush_cache(void) { - const char * fcn_name = "check_flush_cache"; H5F_t * file_ptr = NULL; TESTING("H5C_flush_cache() functionality"); @@ -3041,7 +3027,7 @@ check_flush_cache(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -3068,7 +3054,6 @@ check_flush_cache(void) static void check_flush_cache__empty_cache(H5F_t * file_ptr) { - /* const char * fcn_name = "check_flush_cache__empty_cache"; */ H5C_t * cache_ptr = file_ptr->shared->cache; herr_t result; @@ -3163,7 +3148,6 @@ check_flush_cache__empty_cache(H5F_t * file_ptr) static void check_flush_cache__multi_entry(H5F_t * file_ptr) { - /* const char * fcn_name = "check_flush_cache__multi_entry"; */ H5C_t *cache_ptr = file_ptr->shared->cache; if ( cache_ptr == NULL ) { @@ -4830,7 +4814,6 @@ check_flush_cache__multi_entry_test(H5F_t * file_ptr, int spec_size, struct flush_cache_test_spec spec[]) { - /* const char * fcn_name = "check_flush_cache__multi_entry_test"; */ H5C_t * cache_ptr = file_ptr->shared->cache; static char msg[128]; herr_t result; @@ -5055,7 +5038,6 @@ check_flush_cache__pe_multi_entry_test(H5F_t * file_ptr, int spec_size, struct pe_flush_cache_test_spec spec[]) { - /* const char * fcn_name = "check_flush_cache__pe_multi_entry_test"; */ H5C_t *cache_ptr = file_ptr->shared->cache; static char msg[128]; herr_t result; @@ -5295,7 +5277,6 @@ check_flush_cache__pe_multi_entry_test(H5F_t * file_ptr, static void check_flush_cache__flush_ops(H5F_t * file_ptr) { - /* const char * fcn_name = "check_flush_cache__flush_ops"; */ H5C_t *cache_ptr = file_ptr->shared->cache; if ( cache_ptr == NULL ) { @@ -9611,7 +9592,6 @@ check_flush_cache__flush_op_test(H5F_t * file_ptr, static void check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr) { - /* const char * fcn_name = "check_flush_cache__flush_op_eviction_test"; */ H5C_t * cache_ptr = file_ptr->shared->cache; int i; int num_variable_entries = 8; @@ -12346,12 +12326,11 @@ check_flush_cache__single_entry_test(H5F_t * file_ptr, hbool_t expected_flushed, hbool_t expected_destroyed) { - /* const char * fcn_name = "check_flush_cache__single_entry_test"; */ H5C_t * cache_ptr = file_ptr->shared->cache; static char msg[128]; herr_t result; test_entry_t * base_addr; - test_entry_t * entry_ptr; + test_entry_t * entry_ptr = NULL; if ( cache_ptr == NULL ) { @@ -12534,13 +12513,12 @@ check_flush_cache__pinned_single_entry_test(H5F_t * file_ptr, hbool_t expected_flushed, hbool_t expected_destroyed) { - /* const char *fcn_name = "check_flush_cache__pinned_single_entry_test"; */ H5C_t * cache_ptr = file_ptr->shared->cache; static char msg[128]; hbool_t expected_loaded = TRUE; herr_t result; test_entry_t * base_addr; - test_entry_t * entry_ptr; + test_entry_t * entry_ptr = NULL; if ( cache_ptr == NULL ) { @@ -12726,7 +12704,6 @@ check_flush_cache__pinned_single_entry_test(H5F_t * file_ptr, static unsigned check_get_entry_status(void) { - const char * fcn_name = "check_get_entry_status"; static char msg[128]; herr_t result; hbool_t in_cache; @@ -12929,7 +12906,7 @@ check_get_entry_status(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -12956,7 +12933,6 @@ check_get_entry_status(void) static unsigned check_expunge_entry(void) { - const char * fcn_name = "check_expunge_entry"; static char msg[128]; herr_t result; hbool_t in_cache; @@ -13244,7 +13220,7 @@ check_expunge_entry(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -13272,7 +13248,6 @@ check_expunge_entry(void) static unsigned check_multiple_read_protect(void) { - const char * fcn_name = "check_multiple_read_protect()"; H5F_t * file_ptr = NULL; #if H5C_COLLECT_CACHE_STATS H5C_t * cache_ptr = NULL; @@ -13669,7 +13644,7 @@ check_multiple_read_protect(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -13697,7 +13672,6 @@ check_multiple_read_protect(void) static unsigned check_move_entry(void) { - const char * fcn_name = "check_move_entry"; int i; H5F_t * file_ptr = NULL; struct move_entry_test_spec test_specs[4] = @@ -13782,7 +13756,7 @@ check_move_entry(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -13814,7 +13788,6 @@ check_move_entry__run_test(H5F_t * file_ptr, int test_num, struct move_entry_test_spec * spec_ptr) { - /* const char * fcn_name = "check_move_entry__run_test"; */ H5C_t * cache_ptr = file_ptr->shared->cache; static char msg[128]; unsigned int flags = H5C__NO_FLAGS_SET; @@ -13950,7 +13923,6 @@ check_move_entry__run_test(H5F_t * file_ptr, static unsigned check_pin_protected_entry(void) { - const char * fcn_name = "check_pin_protected_entry"; static char msg[128]; herr_t result; H5F_t * file_ptr = NULL; @@ -14025,7 +13997,7 @@ check_pin_protected_entry(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -14050,7 +14022,6 @@ check_pin_protected_entry(void) static unsigned check_resize_entry(void) { - const char * fcn_name = "check_resize_entry"; static char msg[128]; herr_t result; hbool_t in_cache; @@ -14062,7 +14033,7 @@ check_resize_entry(void) H5F_t * file_ptr = NULL; H5C_t * cache_ptr = NULL; test_entry_t * base_addr; - test_entry_t * entry_ptr; + test_entry_t * entry_ptr = NULL; TESTING("entry resize functionality"); @@ -15034,7 +15005,7 @@ check_resize_entry(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -15062,7 +15033,6 @@ check_resize_entry(void) static unsigned check_evictions_enabled(void) { - const char * fcn_name = "check_evictions_enabled"; static char msg[128]; herr_t result; hbool_t show_progress = FALSE; @@ -15116,7 +15086,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* create the cache */ if ( pass ) { @@ -15141,7 +15111,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that it is empty */ if ( pass ) { @@ -15161,7 +15131,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that H5C_get_evictions_enabled() returns the expected value */ if ( pass ) { @@ -15178,7 +15148,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15193,7 +15163,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that the cache is full */ if ( pass ) { @@ -15214,7 +15184,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15226,7 +15196,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that an entry has been evicted */ if ( pass ) { @@ -15246,7 +15216,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15283,7 +15253,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15294,7 +15264,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 10 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that another entry has been evicted */ if ( pass ) { @@ -15314,7 +15284,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 11 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15350,7 +15320,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 12 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* disable evictions */ if ( pass ) { @@ -15367,7 +15337,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 13 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that evictions are disabled */ if ( pass ) { @@ -15387,7 +15357,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 14 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15399,7 +15369,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 15 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that no entry has been evicted */ if ( pass ) { @@ -15419,7 +15389,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 16 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15430,7 +15400,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 17 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that no entry has been evicted */ if ( pass ) { @@ -15450,7 +15420,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 18 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* re-enable evictions */ if ( pass ) { @@ -15467,7 +15437,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 19 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15479,7 +15449,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 20 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that no entries have been evicted */ if ( pass ) { @@ -15499,7 +15469,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 21 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15511,7 +15481,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 22 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that the entries have been evicted to bring the * cache back down to its normal size. @@ -15534,7 +15504,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 23 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15570,7 +15540,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 24 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15606,7 +15576,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 25 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* disable evictions again */ if ( pass ) { @@ -15623,7 +15593,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 26 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15637,7 +15607,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 27 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that the cache has grown */ if ( pass ) { @@ -15657,7 +15627,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 28 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* re-enable evictions again */ if ( pass ) { @@ -15674,7 +15644,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 29 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15685,7 +15655,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 30 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* verify that the cache has returned to its maximum size */ if ( pass ) { @@ -15705,7 +15675,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 31 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15741,7 +15711,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 32 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* disable evictions one last time before we shut down */ if ( pass ) { @@ -15758,7 +15728,7 @@ check_evictions_enabled(void) if ( show_progress ) /* 33 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -15767,14 +15737,14 @@ check_evictions_enabled(void) if ( show_progress ) /* 34 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { PASSED(); } else { H5_FAILED(); } if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -15801,7 +15771,6 @@ check_evictions_enabled(void) static unsigned check_flush_protected_err(void) { - const char * fcn_name = "check_flush_protected_err"; H5F_t * file_ptr = NULL; TESTING("flush cache with protected entry error"); @@ -15850,7 +15819,7 @@ check_flush_protected_err(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -15881,7 +15850,6 @@ extern H5C_t * saved_cache; static unsigned check_destroy_pinned_err(void) { - const char * fcn_name = "check_destroy_pinned_err()"; H5F_t * file_ptr = NULL; TESTING("destroy cache with permanently pinned entry error"); @@ -15942,7 +15910,7 @@ check_destroy_pinned_err(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -15969,7 +15937,6 @@ check_destroy_pinned_err(void) static unsigned check_destroy_protected_err(void) { - const char * fcn_name = "check_destroy_protected_err"; H5F_t * file_ptr = NULL; TESTING("destroy cache with protected entry error"); @@ -16028,7 +15995,7 @@ check_destroy_protected_err(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16055,7 +16022,6 @@ check_destroy_protected_err(void) static unsigned check_duplicate_insert_err(void) { - const char * fcn_name = "check_duplicate_insert_err"; herr_t result = -1; H5F_t * file_ptr = NULL; test_entry_t * base_addr; @@ -16106,7 +16072,7 @@ check_duplicate_insert_err(void) if ( ! pass ) { HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16133,7 +16099,6 @@ check_duplicate_insert_err(void) static unsigned check_move_err(void) { - const char * fcn_name = "check_move_err()"; herr_t result; H5F_t * file_ptr = NULL; H5C_t * cache_ptr = NULL; @@ -16201,7 +16166,7 @@ check_move_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16230,7 +16195,6 @@ check_move_err(void) static unsigned check_double_pin_err(void) { - const char * fcn_name = "check_double_pin_err()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -16289,7 +16253,7 @@ check_double_pin_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16318,7 +16282,6 @@ check_double_pin_err(void) static unsigned check_double_unpin_err(void) { - const char * fcn_name = "check_double_unpin_err()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -16388,7 +16351,7 @@ check_double_unpin_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16417,7 +16380,6 @@ check_double_unpin_err(void) static unsigned check_pin_entry_errs(void) { - const char * fcn_name = "check_pin_entry_errs()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -16499,7 +16461,7 @@ check_pin_entry_errs(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16524,7 +16486,6 @@ check_pin_entry_errs(void) static unsigned check_double_protect_err(void) { - const char * fcn_name = "check_double_protect_err()"; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; H5C_cache_entry_t * cache_entry_ptr; @@ -16577,7 +16538,7 @@ check_double_protect_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16602,7 +16563,6 @@ check_double_protect_err(void) static unsigned check_double_unprotect_err(void) { - const char * fcn_name = "check_double_unprotect_err()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -16654,7 +16614,7 @@ check_double_unprotect_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16682,7 +16642,6 @@ check_double_unprotect_err(void) static unsigned check_mark_entry_dirty_errs(void) { - const char * fcn_name = "check_mark_entry_dirty_errs()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -16734,7 +16693,7 @@ check_mark_entry_dirty_errs(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16763,7 +16722,6 @@ check_mark_entry_dirty_errs(void) static unsigned check_expunge_entry_errs(void) { - const char * fcn_name = "check_expunge_entry_errs()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -16858,7 +16816,7 @@ check_expunge_entry_errs(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16883,7 +16841,6 @@ check_expunge_entry_errs(void) static unsigned check_resize_entry_errs(void) { - const char * fcn_name = "check_resize_entry_errs()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -16960,7 +16917,7 @@ check_resize_entry_errs(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -16985,7 +16942,6 @@ check_resize_entry_errs(void) static unsigned check_unprotect_ro_dirty_err(void) { - const char * fcn_name = "check_unprotect_ro_dirty_err()"; herr_t result; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; @@ -17086,7 +17042,7 @@ check_unprotect_ro_dirty_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -17115,7 +17071,6 @@ check_unprotect_ro_dirty_err(void) static unsigned check_protect_ro_rw_err(void) { - const char * fcn_name = "check_protect_ro_rw_err()"; H5F_t * file_ptr = NULL; test_entry_t * entry_ptr; void * thing_ptr = NULL; @@ -17169,7 +17124,7 @@ check_protect_ro_rw_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -17198,7 +17153,6 @@ check_protect_ro_rw_err(void) static unsigned check_check_evictions_enabled_err(void) { - const char * fcn_name = "check_evictions_enabled_err()"; herr_t result; hbool_t evictions_enabled; H5F_t * file_ptr = NULL; @@ -17308,7 +17262,7 @@ check_check_evictions_enabled_err(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -17357,7 +17311,6 @@ static void test_rpt_fcn(H5_ATTR_UNUSED H5C_t * cache_ptr, static unsigned check_auto_cache_resize(void) { - const char * fcn_name = "check_auto_cache_resize()"; hbool_t show_progress = FALSE; herr_t result; int32_t i; @@ -21534,7 +21487,7 @@ check_auto_cache_resize(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -21565,7 +21518,6 @@ check_auto_cache_resize(void) static unsigned check_auto_cache_resize_disable(void) { - const char * fcn_name = "check_auto_cache_resize_disable()"; hbool_t show_progress = FALSE; herr_t result; int32_t i; @@ -24264,7 +24216,7 @@ check_auto_cache_resize_disable(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -24291,7 +24243,6 @@ check_auto_cache_resize_disable(void) static unsigned check_auto_cache_resize_epoch_markers(void) { - const char * fcn_name = "check_auto_cache_resize_epoch_markers()"; hbool_t show_progress = FALSE; herr_t result; int32_t i; @@ -24969,7 +24920,7 @@ check_auto_cache_resize_epoch_markers(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -25001,7 +24952,6 @@ check_auto_cache_resize_epoch_markers(void) static unsigned check_auto_cache_resize_input_errs(void) { - const char * fcn_name = "check_auto_cache_resize_input_errs()"; herr_t result; H5F_t * file_ptr = NULL; H5C_t * cache_ptr = NULL; @@ -27377,7 +27327,7 @@ check_auto_cache_resize_input_errs(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -27409,7 +27359,6 @@ check_auto_cache_resize_input_errs(void) static unsigned check_auto_cache_resize_aux_fcns(void) { - const char * fcn_name = "check_auto_cache_resize_aux_fcns()"; herr_t result; int32_t i; H5F_t * file_ptr = NULL; @@ -27529,7 +27478,7 @@ check_auto_cache_resize_aux_fcns(void) pass = FALSE; failure_mssg = "H5C_get_cache_hit_rate failed.\n"; - } else if ( hit_rate > FP_EPSILON ) { /* i.e. hit_rate != 0.0 */ + } else if ( !H5_DBL_ABS_EQUAL(hit_rate, (double)0.0f) ) { /* i.e. hit_rate != 0.0 */ pass = FALSE; failure_mssg = @@ -27562,7 +27511,7 @@ check_auto_cache_resize_aux_fcns(void) pass = FALSE; failure_mssg = "H5C_get_cache_hit_rate failed.\n"; - } else if ( hit_rate > FP_EPSILON ) { /* i.e. hit_rate != 0.0 */ + } else if ( !H5_DBL_ABS_EQUAL(hit_rate, (double)0.0f) ) { /* i.e. hit_rate != 0.0 */ pass = FALSE; failure_mssg = @@ -27607,7 +27556,7 @@ check_auto_cache_resize_aux_fcns(void) pass = FALSE; failure_mssg = "H5C_get_cache_hit_rate failed.\n"; - } else if ( ! DBL_REL_EQUAL(hit_rate, 0.5f, FP_EPSILON) ) { /* i.e. hit_rate != 0.5 */ + } else if ( ! H5_DBL_ABS_EQUAL(hit_rate, (double)0.5f) ) { /* i.e. hit_rate != 0.5 */ pass = FALSE; failure_mssg = @@ -27690,7 +27639,7 @@ check_auto_cache_resize_aux_fcns(void) pass = FALSE; failure_mssg = "H5C_get_cache_hit_rate failed.\n"; - } else if ( ! DBL_REL_EQUAL(hit_rate, 0.5F, FP_EPSILON) ) { /* i.e. hit_rate != 0.5 */ + } else if ( ! H5_DBL_ABS_EQUAL(hit_rate, (double)0.5F) ) { /* i.e. hit_rate != 0.5 */ pass = FALSE; failure_mssg = @@ -27916,7 +27865,7 @@ check_auto_cache_resize_aux_fcns(void) if ( ! pass ) { HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); } return (unsigned)!pass; @@ -27955,7 +27904,6 @@ check_auto_cache_resize_aux_fcns(void) static unsigned check_metadata_blizzard_absence(hbool_t fill_via_insertion) { - const char * fcn_name = "check_metadata_blizzard_absence"; int entry_type = HUGE_ENTRY_TYPE; size_t entry_size = HUGE_ENTRY_SIZE; /* 16 KB */ H5F_t * file_ptr = NULL; @@ -28142,7 +28090,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 0 */ HDfprintf(stdout, "\n%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28166,7 +28114,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 1 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); /* ======================================================================== * ======================================================================== @@ -28230,7 +28178,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 2 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); /* ======================================================================== * ======================================================================== @@ -28300,7 +28248,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 3 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28354,7 +28302,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 4 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28407,7 +28355,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 5 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); /* ======================================================================== * ======================================================================== @@ -28482,7 +28430,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 6 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28554,7 +28502,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 7 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); /* ======================================================================== * ======================================================================== @@ -28614,7 +28562,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 8 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28663,7 +28611,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 9 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28714,7 +28662,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 10 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28762,7 +28710,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 11 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { @@ -28773,7 +28721,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion) if ( show_progress) /* 12 */ HDfprintf(stdout, "%s: check point %d -- pass %d\n", - fcn_name, checkpoint++, pass); + FUNC, checkpoint++, pass); if ( pass ) { PASSED(); } else { H5_FAILED(); } diff --git a/test/cache_api.c b/test/cache_api.c index 3c0871f..6953d17 100644 --- a/test/cache_api.c +++ b/test/cache_api.c @@ -70,7 +70,6 @@ static unsigned check_file_mdc_api_errs(void); static unsigned check_fapl_mdc_api_calls(void) { - const char * fcn_name = "check_fapl_mdc_api_calls()"; char filename[512]; herr_t result; hid_t fapl_id = -1; @@ -487,7 +486,7 @@ check_fapl_mdc_api_calls(void) if ( ! pass ) HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); return !pass; @@ -521,7 +520,6 @@ check_fapl_mdc_api_calls(void) static unsigned check_file_mdc_api_calls(void) { - const char * fcn_name = "check_file_mdc_api_calls()"; char filename[512]; hid_t file_id = -1; size_t max_size; @@ -775,7 +773,7 @@ check_file_mdc_api_calls(void) pass = FALSE; failure_mssg = "H5Fget_mdc_hit_rate() failed 1.\n"; - } else if ( hit_rate != 0.0f ) { + } else if ( !H5_DBL_ABS_EQUAL(hit_rate, (double)0.0f) ) { pass = FALSE; failure_mssg = @@ -839,7 +837,7 @@ check_file_mdc_api_calls(void) if ( ! pass ) HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); return !pass; @@ -872,7 +870,6 @@ check_file_mdc_api_calls(void) static unsigned mdc_api_call_smoke_check(int express_test) { - const char * fcn_name = "mdc_api_call_smoke_check()"; char filename[512]; hbool_t valid_chunk; hbool_t dump_hit_rate = FALSE; @@ -884,7 +881,7 @@ mdc_api_call_smoke_check(int express_test) hid_t filespace_ids[NUM_DSETS]; hid_t memspace_id = -1; hid_t dataset_ids[NUM_DSETS]; - hid_t properties; + hid_t properties = -1; char dset_name[64]; int i, j, k, l, m, n; herr_t status; @@ -1187,8 +1184,8 @@ mdc_api_call_smoke_check(int express_test) } /* select on disk hyperslab */ - offset[0] = i; /*offset of hyperslab in file*/ - offset[1] = j; + offset[0] = (hsize_t)i; /*offset of hyperslab in file*/ + offset[1] = (hsize_t)j; a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ a_size[1] = CHUNK_SIZE; status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, @@ -1258,8 +1255,8 @@ mdc_api_call_smoke_check(int express_test) j = (rand() % (DSET_SIZE / CHUNK_SIZE)) * CHUNK_SIZE; /* select on disk hyperslab */ - offset[0] = i; /*offset of hyperslab in file*/ - offset[1] = j; + offset[0] = (hsize_t)i; /*offset of hyperslab in file*/ + offset[1] = (hsize_t)j; a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ a_size[1] = CHUNK_SIZE; status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, @@ -1383,8 +1380,8 @@ mdc_api_call_smoke_check(int express_test) j = (rand() % (DSET_SIZE / CHUNK_SIZE)) * CHUNK_SIZE; /* select on disk hyperslab */ - offset[0] = i; /*offset of hyperslab in file*/ - offset[1] = j; + offset[0] = (hsize_t)i; /*offset of hyperslab in file*/ + offset[1] = (hsize_t)j; a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ a_size[1] = CHUNK_SIZE; status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, @@ -1516,7 +1513,7 @@ mdc_api_call_smoke_check(int express_test) if ( ! pass ) HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); return !pass; @@ -2885,7 +2882,6 @@ H5AC_cache_config_t invalid_configs[NUM_INVALID_CONFIGS] = static unsigned check_fapl_mdc_api_errs(void) { - const char * fcn_name = "check_fapl_mdc_api_errs()"; static char msg[128]; int i; herr_t result; @@ -3032,7 +3028,7 @@ check_fapl_mdc_api_errs(void) if ( ! pass ) HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); return !pass; @@ -3058,7 +3054,6 @@ check_fapl_mdc_api_errs(void) static unsigned check_file_mdc_api_errs(void) { - const char * fcn_name = "check_file_mdc_api_errs()"; char filename[512]; static char msg[128]; hbool_t show_progress = FALSE; @@ -3086,7 +3081,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: calling h5_fixname().\n", fcn_name); + HDfprintf(stdout, "%s: calling h5_fixname().\n", FUNC); } if ( h5_fixname(FILENAME[1], H5P_DEFAULT, filename, sizeof(filename)) @@ -3101,7 +3096,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: calling H5Fcreate().\n", fcn_name); + HDfprintf(stdout, "%s: calling H5Fcreate().\n", FUNC); } file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); @@ -3123,7 +3118,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 1.\n", fcn_name); + HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 1.\n", FUNC); } H5E_BEGIN_TRY { @@ -3141,7 +3136,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 2.\n", fcn_name); + HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 2.\n", FUNC); } H5E_BEGIN_TRY { @@ -3160,7 +3155,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 3.\n", fcn_name); + HDfprintf(stdout, "%s: testing H5Fget_mdc_config() 3.\n", FUNC); } H5E_BEGIN_TRY { @@ -3182,7 +3177,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: testing H5Fset_mdc_config() 1.\n", fcn_name); + HDfprintf(stdout, "%s: testing H5Fset_mdc_config() 1.\n", FUNC); } H5E_BEGIN_TRY { @@ -3200,7 +3195,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: testing H5Fset_mdc_config() 2.\n", fcn_name); + HDfprintf(stdout, "%s: testing H5Fset_mdc_config() 2.\n", FUNC); } H5E_BEGIN_TRY { @@ -3221,7 +3216,7 @@ check_file_mdc_api_errs(void) HDfprintf(stdout, "%s: testing H5Fset_mdc_config() with invalid config %d.\n", - fcn_name, i); + FUNC, i); } H5E_BEGIN_TRY { @@ -3250,7 +3245,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { HDfprintf(stdout, "%s: testing H5Fget_mdc_hit_rate() 1.\n", - fcn_name); + FUNC); } H5E_BEGIN_TRY { @@ -3269,7 +3264,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { HDfprintf(stdout, "%s: testing H5Fget_mdc_hit_rate() 2.\n", - fcn_name); + FUNC); } H5E_BEGIN_TRY { @@ -3290,7 +3285,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { HDfprintf(stdout, "%s: testing H5Freset_mdc_hit_rate_stats().\n", - fcn_name); + FUNC); } H5E_BEGIN_TRY { @@ -3311,7 +3306,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: testing H5Fget_mdc_size() 1.\n", fcn_name); + HDfprintf(stdout, "%s: testing H5Fget_mdc_size() 1.\n", FUNC); } H5E_BEGIN_TRY { @@ -3330,7 +3325,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: testing H5Fget_mdc_size() 2.\n", fcn_name); + HDfprintf(stdout, "%s: testing H5Fget_mdc_size() 2.\n", FUNC); } if ( ( H5Fget_mdc_size(file_id, &max_size, NULL, NULL, NULL) < 0 ) || @@ -3352,7 +3347,7 @@ check_file_mdc_api_errs(void) if ( show_progress ) { - HDfprintf(stdout, "%s: cleaning up from tests.\n", fcn_name); + HDfprintf(stdout, "%s: cleaning up from tests.\n", FUNC); } if ( H5Fclose(file_id) < 0 ) { @@ -3371,7 +3366,7 @@ check_file_mdc_api_errs(void) if ( ! pass ) HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", - fcn_name, failure_mssg); + FUNC, failure_mssg); return !pass; diff --git a/test/cache_common.c b/test/cache_common.c index 4abc0f4..60ab827 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -2391,7 +2391,6 @@ H5F_t * setup_cache(size_t max_cache_size, size_t min_clean_size) { - const char * fcn_name = "setup_cache()"; char filename[512]; hbool_t show_progress = FALSE; hbool_t verbose = TRUE; @@ -2405,7 +2404,7 @@ setup_cache(size_t max_cache_size, if ( show_progress ) /* 1 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); saved_fid = -1; @@ -2422,7 +2421,7 @@ setup_cache(size_t max_cache_size, if ( show_progress ) /* 2 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( ( pass ) && ( try_core_file_driver ) ) { @@ -2444,7 +2443,7 @@ setup_cache(size_t max_cache_size, core_file_driver_failed = TRUE; if ( verbose ) { - HDfprintf(stdout, "%s: H5Fcreate() with CFD failed.\n", fcn_name); + HDfprintf(stdout, "%s: H5Fcreate() with CFD failed.\n", FUNC); } } else { @@ -2455,7 +2454,7 @@ setup_cache(size_t max_cache_size, if ( show_progress ) /* 3 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); /* if we either aren't using the core file driver, or a create * with the core file driver failed, try again with a regular file. @@ -2473,14 +2472,14 @@ setup_cache(size_t max_cache_size, failure_mssg = "H5Fcreate() failed."; if ( verbose ) { - HDfprintf(stdout, "%s: H5Fcreate() failed.\n", fcn_name); + HDfprintf(stdout, "%s: H5Fcreate() failed.\n", FUNC); } } } if ( show_progress ) /* 4 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2494,7 +2493,7 @@ setup_cache(size_t max_cache_size, failure_mssg = "H5Fflush() failed."; if ( verbose ) { - HDfprintf(stdout, "%s: H5Fflush() failed.\n", fcn_name); + HDfprintf(stdout, "%s: H5Fflush() failed.\n", FUNC); } } else { @@ -2507,7 +2506,7 @@ setup_cache(size_t max_cache_size, failure_mssg = "Can't get file_ptr."; if ( verbose ) { - HDfprintf(stdout, "%s: H5Fflush() failed.\n", fcn_name); + HDfprintf(stdout, "%s: H5Fflush() failed.\n", FUNC); } } } @@ -2515,7 +2514,7 @@ setup_cache(size_t max_cache_size, if ( show_progress ) /* 5 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2567,7 +2566,7 @@ setup_cache(size_t max_cache_size, if ( show_progress ) /* 6 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2577,7 +2576,7 @@ setup_cache(size_t max_cache_size, failure_mssg = "H5C_create() failed."; if ( verbose ) { - HDfprintf(stdout, "%s: H5C_create() failed.\n", fcn_name); + HDfprintf(stdout, "%s: H5C_create() failed.\n", FUNC); } } else if ( cache_ptr->magic != H5C__H5C_T_MAGIC ) { @@ -2586,14 +2585,14 @@ setup_cache(size_t max_cache_size, failure_mssg = "Bad cache_ptr magic."; if ( verbose ) { - HDfprintf(stdout, "%s: Bad cache_ptr magic.\n", fcn_name); + HDfprintf(stdout, "%s: Bad cache_ptr magic.\n", FUNC); } } } if ( show_progress ) /* 7 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { /* allocate space for test entries */ @@ -2606,7 +2605,7 @@ setup_cache(size_t max_cache_size, failure_mssg = "H5MF_alloc() failed."; if ( verbose ) { - HDfprintf(stdout, "%s: H5MF_alloc() failed.\n", fcn_name); + HDfprintf(stdout, "%s: H5MF_alloc() failed.\n", FUNC); } } else if ( actual_base_addr > BASE_ADDR ) { @@ -2620,7 +2619,7 @@ setup_cache(size_t max_cache_size, if ( verbose ) { HDfprintf(stdout, "%s: actual_base_addr > BASE_ADDR.\n", - fcn_name); + FUNC); } } @@ -2629,7 +2628,7 @@ setup_cache(size_t max_cache_size, if ( show_progress ) /* 8 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); if ( pass ) { @@ -2642,7 +2641,7 @@ setup_cache(size_t max_cache_size, if ( show_progress ) /* 9 */ HDfprintf(stdout, "%s() - %0d -- pass = %d\n", - fcn_name, mile_stone++, (int)pass); + FUNC, mile_stone++, (int)pass); return(ret_val); } /* setup_cache() */ @@ -2762,7 +2761,6 @@ expunge_entry(H5F_t * file_ptr, int32_t type, int32_t idx) { - /* const char * fcn_name = "expunge_entry()"; */ herr_t result; test_entry_t * base_addr; test_entry_t * entry_ptr; @@ -2825,7 +2823,6 @@ flush_cache(H5F_t * file_ptr, hbool_t dump_stats, hbool_t dump_detailed_stats) { - const char * fcn_name = "flush_cache()"; hbool_t verbose = FALSE; verify_unprotected(); @@ -2861,7 +2858,7 @@ flush_cache(H5F_t * file_ptr, if(verbose) { HDfprintf(stdout, "%s: unexpected il/is/cis/dis = %lld/%lld/%lld/%lld.\n", - fcn_name, + FUNC, (long long)(cache_ptr->index_len), (long long)(cache_ptr->index_size), (long long)(cache_ptr->clean_index_size), @@ -3579,13 +3576,12 @@ row_major_scan_forward(H5F_t * file_ptr, int dirty_destroys, int dirty_unprotects) { - const char * fcn_name = "row_major_scan_forward"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int32_t type = 0; int32_t idx; if ( verbose ) - HDfprintf(stdout, "%s(): entering.\n", fcn_name); + HDfprintf(stdout, "%s(): entering.\n", FUNC); if ( pass ) { @@ -3871,8 +3867,7 @@ hl_row_major_scan_forward(H5F_t * file_ptr, hbool_t display_detailed_stats, hbool_t do_inserts) { - const char * fcn_name = "hl_row_major_scan_forward"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int32_t type = 0; int32_t idx; int32_t i; @@ -3880,7 +3875,7 @@ hl_row_major_scan_forward(H5F_t * file_ptr, int32_t local_max_index; if ( verbose ) - HDfprintf(stdout, "%s(): entering.\n", fcn_name); + HDfprintf(stdout, "%s(): entering.\n", FUNC); if ( pass ) { @@ -3983,13 +3978,12 @@ row_major_scan_backward(H5F_t * file_ptr, int dirty_destroys, int dirty_unprotects) { - const char * fcn_name = "row_major_scan_backward"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int32_t type = NUMBER_OF_ENTRY_TYPES - 1; int32_t idx; if ( verbose ) - HDfprintf(stdout, "%s(): Entering.\n", fcn_name); + HDfprintf(stdout, "%s(): Entering.\n", FUNC); if ( pass ) { @@ -4269,8 +4263,7 @@ hl_row_major_scan_backward(H5F_t * file_ptr, hbool_t display_detailed_stats, hbool_t do_inserts) { - const char * fcn_name = "hl_row_major_scan_backward"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int32_t type = NUMBER_OF_ENTRY_TYPES - 1; int32_t idx; int32_t i; @@ -4278,7 +4271,7 @@ hl_row_major_scan_backward(H5F_t * file_ptr, int32_t local_max_index; if ( verbose ) - HDfprintf(stdout, "%s(): entering.\n", fcn_name); + HDfprintf(stdout, "%s(): entering.\n", FUNC); if ( pass ) { @@ -4376,13 +4369,12 @@ col_major_scan_forward(H5F_t * file_ptr, hbool_t do_inserts, int dirty_unprotects) { - const char * fcn_name = "col_major_scan_forward()"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int32_t type = 0; int32_t idx; if ( verbose ) - HDfprintf(stdout, "%s: entering.\n", fcn_name); + HDfprintf(stdout, "%s: entering.\n", FUNC); if ( pass ) { @@ -4477,8 +4469,7 @@ hl_col_major_scan_forward(H5F_t * file_ptr, hbool_t do_inserts, int dirty_unprotects) { - const char * fcn_name = "hl_col_major_scan_forward()"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int32_t type = 0; int32_t idx; int32_t lag = 200; @@ -4486,7 +4477,7 @@ hl_col_major_scan_forward(H5F_t * file_ptr, int32_t local_max_index; if ( verbose ) - HDfprintf(stdout, "%s: entering.\n", fcn_name); + HDfprintf(stdout, "%s: entering.\n", FUNC); if ( pass ) { @@ -4594,14 +4585,13 @@ col_major_scan_backward(H5F_t * file_ptr, hbool_t do_inserts, int dirty_unprotects) { - const char * fcn_name = "col_major_scan_backward()"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int mile_stone = 1; int32_t type; int32_t idx; if ( verbose ) - HDfprintf(stdout, "%s: entering.\n", fcn_name); + HDfprintf(stdout, "%s: entering.\n", FUNC); if ( pass ) { @@ -4620,7 +4610,7 @@ col_major_scan_backward(H5F_t * file_ptr, idx = MAX_ENTRIES + lag; if ( verbose ) /* 1 */ - HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++); + HDfprintf(stdout, "%s: point %d.\n", FUNC, mile_stone++); while ( ( pass ) && ( (idx + lag) >= 0 ) ) @@ -4668,7 +4658,7 @@ col_major_scan_backward(H5F_t * file_ptr, } if ( verbose ) /* 2 */ - HDfprintf(stdout, "%s: point %d.\n", fcn_name, mile_stone++); + HDfprintf(stdout, "%s: point %d.\n", FUNC, mile_stone++); if ( ( pass ) && ( display_stats ) ) { @@ -4676,7 +4666,7 @@ col_major_scan_backward(H5F_t * file_ptr, } if ( verbose ) - HDfprintf(stdout, "%s: exiting.\n", fcn_name); + HDfprintf(stdout, "%s: exiting.\n", FUNC); return; @@ -4708,16 +4698,15 @@ hl_col_major_scan_backward(H5F_t * file_ptr, hbool_t do_inserts, int dirty_unprotects) { - const char * fcn_name = "hl_col_major_scan_backward()"; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; int32_t type = 0; - int32_t idx; + int32_t idx = -1; int32_t lag = 50; int32_t i; - int32_t local_max_index; + int32_t local_max_index = -1; if ( verbose ) - HDfprintf(stdout, "%s: entering.\n", fcn_name); + HDfprintf(stdout, "%s: entering.\n", FUNC); if ( pass ) { @@ -5101,7 +5090,7 @@ check_and_validate_cache_hit_rate(hid_t file_id, pass = FALSE; failure_mssg = "H5Fget_mdc_hit_rate() failed."; - } else if ( ! DBL_REL_EQUAL(hit_rate, expected_hit_rate, 0.00001F) ) { + } else if ( ! H5_DBL_ABS_EQUAL(hit_rate, expected_hit_rate) ) { pass = FALSE; failure_mssg = "unexpected hit rate."; @@ -5294,7 +5283,7 @@ resize_configs_are_equal(const H5C_auto_size_ctl_t *a, return(FALSE); else if(compare_init && (a->initial_size != b->initial_size)) return(FALSE); - else if(HDfabs(a->min_clean_fraction - b->min_clean_fraction) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->min_clean_fraction, b->min_clean_fraction)) return(FALSE); else if(a->max_size != b->max_size) return(FALSE); @@ -5304,9 +5293,9 @@ resize_configs_are_equal(const H5C_auto_size_ctl_t *a, return(FALSE); else if(a->incr_mode != b->incr_mode) return(FALSE); - else if(HDfabs(a->lower_hr_threshold - b->lower_hr_threshold) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->lower_hr_threshold, b->lower_hr_threshold)) return(FALSE); - else if(HDfabs(a->increment - b->increment) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->increment, b->increment)) return(FALSE); else if(a->apply_max_increment != b->apply_max_increment) return(FALSE); @@ -5314,15 +5303,15 @@ resize_configs_are_equal(const H5C_auto_size_ctl_t *a, return(FALSE); else if(a->flash_incr_mode != b->flash_incr_mode) return(FALSE); - else if(HDfabs(a->flash_multiple - b->flash_multiple) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->flash_multiple, b->flash_multiple)) return(FALSE); - else if(HDfabs(a->flash_threshold - b->flash_threshold) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->flash_threshold, b->flash_threshold)) return(FALSE); else if(a->decr_mode != b->decr_mode) return(FALSE); - else if(HDfabs(a->upper_hr_threshold - b->upper_hr_threshold) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->upper_hr_threshold, b->upper_hr_threshold)) return(FALSE); - else if(HDfabs(a->decrement - b->decrement) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->decrement, b->decrement)) return(FALSE); else if(a->apply_max_decrement != b->apply_max_decrement) return(FALSE); @@ -5332,7 +5321,7 @@ resize_configs_are_equal(const H5C_auto_size_ctl_t *a, return(FALSE); else if(a->apply_empty_reserve != b->apply_empty_reserve) return(FALSE); - else if(HDfabs(a->empty_reserve - b->empty_reserve) > FP_EPSILON) + else if(!H5_DBL_ABS_EQUAL(a->empty_reserve, b->empty_reserve)) return(FALSE); return(TRUE); } diff --git a/test/cache_common.h b/test/cache_common.h index be3195b..48b95d8 100644 --- a/test/cache_common.h +++ b/test/cache_common.h @@ -425,26 +425,26 @@ if ( ( (cache_ptr) == NULL ) || \ ( (a).set_initial_size == (b).set_initial_size ) ) && \ ( ( ! cmp_init_size ) || \ ( (a).initial_size == (b).initial_size ) ) && \ - ( DBL_REL_EQUAL((a).min_clean_fraction, (b).min_clean_fraction, 0.00001 ) ) && \ + ( H5_DBL_ABS_EQUAL((a).min_clean_fraction, (b).min_clean_fraction) ) && \ ( (a).max_size == (b).max_size ) && \ ( (a).min_size == (b).min_size ) && \ ( (a).epoch_length == (b).epoch_length ) && \ ( (a).incr_mode == (b).incr_mode ) && \ - ( DBL_REL_EQUAL((a).lower_hr_threshold, (b).lower_hr_threshold, 0.00001 ) ) && \ - ( DBL_REL_EQUAL((a).increment, (b).increment, 0.00001 ) ) && \ + ( H5_DBL_ABS_EQUAL((a).lower_hr_threshold, (b).lower_hr_threshold) ) && \ + ( H5_DBL_ABS_EQUAL((a).increment, (b).increment) ) && \ ( (a).apply_max_increment == (b).apply_max_increment ) && \ ( (a).max_increment == (b).max_increment ) && \ ( (a).flash_incr_mode == (b).flash_incr_mode ) && \ - ( DBL_REL_EQUAL((a).flash_multiple, (b).flash_multiple, 0.00001 ) ) && \ - ( DBL_REL_EQUAL((a).flash_threshold, (b).flash_threshold, 0.00001 ) ) && \ + ( H5_DBL_ABS_EQUAL((a).flash_multiple, (b).flash_multiple) ) && \ + ( H5_DBL_ABS_EQUAL((a).flash_threshold, (b).flash_threshold) ) && \ ( (a).decr_mode == (b).decr_mode ) && \ - ( DBL_REL_EQUAL((a).upper_hr_threshold, (b).upper_hr_threshold, 0.00001 ) ) && \ - ( DBL_REL_EQUAL((a).decrement, (b).decrement, 0.00001 ) ) && \ + ( H5_DBL_ABS_EQUAL((a).upper_hr_threshold, (b).upper_hr_threshold) ) && \ + ( H5_DBL_ABS_EQUAL((a).decrement, (b).decrement) ) && \ ( (a).apply_max_decrement == (b).apply_max_decrement ) && \ ( (a).max_decrement == (b).max_decrement ) && \ ( (a).epochs_before_eviction == (b).epochs_before_eviction ) && \ ( (a).apply_empty_reserve == (b).apply_empty_reserve ) && \ - ( DBL_REL_EQUAL((a).empty_reserve, (b).empty_reserve, 0.00001 ) ) && \ + ( H5_DBL_ABS_EQUAL((a).empty_reserve, (b).empty_reserve) ) && \ ( (a).dirty_bytes_threshold == (b).dirty_bytes_threshold ) && \ ( (a).metadata_write_strategy == (b).metadata_write_strategy ) ) @@ -482,9 +482,6 @@ if ( ( (cache_ptr) == NULL ) || \ (i).empty_reserve = (e).empty_reserve; \ } -/* Epsilon for floating-point comparisons */ -#define FP_EPSILON 0.000001f - /* misc type definitions */ diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c index d81b7fc..5c591f8 100644 --- a/test/cmpd_dset.c +++ b/test/cmpd_dset.c @@ -1285,16 +1285,16 @@ compare_data(void *src_data, void *dst_data, hbool_t src_subset) s_ptr->c[3] != d_ptr->c[3] || s_ptr->d != d_ptr->d || s_ptr->e != d_ptr->e || - !FLT_ABS_EQUAL(s_ptr->f, d_ptr->f) || - !FLT_ABS_EQUAL(s_ptr->g, d_ptr->g) || - !FLT_ABS_EQUAL(s_ptr->h[0], d_ptr->h[0]) || - !FLT_ABS_EQUAL(s_ptr->h[1], d_ptr->h[1]) || - !FLT_ABS_EQUAL(s_ptr->i, d_ptr->i) || - !FLT_ABS_EQUAL(s_ptr->j, d_ptr->j) || - !DBL_ABS_EQUAL(s_ptr->k, d_ptr->k) || - !DBL_ABS_EQUAL(s_ptr->l, d_ptr->l) || - !DBL_ABS_EQUAL(s_ptr->m, d_ptr->m) || - !DBL_ABS_EQUAL(s_ptr->n, d_ptr->n) ) { + !H5_FLT_ABS_EQUAL(s_ptr->f, d_ptr->f) || + !H5_FLT_ABS_EQUAL(s_ptr->g, d_ptr->g) || + !H5_FLT_ABS_EQUAL(s_ptr->h[0], d_ptr->h[0]) || + !H5_FLT_ABS_EQUAL(s_ptr->h[1], d_ptr->h[1]) || + !H5_FLT_ABS_EQUAL(s_ptr->i, d_ptr->i) || + !H5_FLT_ABS_EQUAL(s_ptr->j, d_ptr->j) || + !H5_DBL_ABS_EQUAL(s_ptr->k, d_ptr->k) || + !H5_DBL_ABS_EQUAL(s_ptr->l, d_ptr->l) || + !H5_DBL_ABS_EQUAL(s_ptr->m, d_ptr->m) || + !H5_DBL_ABS_EQUAL(s_ptr->n, d_ptr->n) ) { H5_FAILED(); printf(" i=%d\n", i); diff --git a/test/cross_read.c b/test/cross_read.c index 35dd2ce..b2fae89 100644 --- a/test/cross_read.c +++ b/test/cross_read.c @@ -126,7 +126,7 @@ static int check_data(const char *dsetname, hid_t fid, hbool_t floating_number) /* Check results */ for (j=0; j<(NX+1); j++) { for (i=0; imagic != MSSG_MAGIC ) { @@ -1048,7 +1041,7 @@ recv_mssg(struct mssg_t *mssg_ptr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank, - fcn_name); + FUNC); } } else if ( mssg_ptr->src != status.MPI_SOURCE ) { @@ -1057,7 +1050,7 @@ recv_mssg(struct mssg_t *mssg_ptr, if ( verbose ) { HDfprintf(stdout, "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -1092,7 +1085,6 @@ static hbool_t send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag) { - const char * fcn_name = "send_mssg()"; hbool_t success = TRUE; int mssg_tag = CACHE_TEST_TAG; int result; @@ -1111,7 +1103,7 @@ send_mssg(struct mssg_t *mssg_ptr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -1133,7 +1125,7 @@ send_mssg(struct mssg_t *mssg_ptr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -1160,7 +1152,6 @@ send_mssg(struct mssg_t *mssg_ptr, static hbool_t setup_derived_types(void) { - const char * fcn_name = "setup_derived_types()"; hbool_t success = TRUE; int i; int result; @@ -1186,7 +1177,7 @@ setup_derived_types(void) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -1208,7 +1199,7 @@ setup_derived_types(void) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -1223,7 +1214,7 @@ setup_derived_types(void) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -1250,7 +1241,6 @@ setup_derived_types(void) static hbool_t takedown_derived_types(void) { - const char * fcn_name = "takedown_derived_types()"; hbool_t success = TRUE; int result; @@ -1262,7 +1252,7 @@ takedown_derived_types(void) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -1292,7 +1282,6 @@ takedown_derived_types(void) static hbool_t reset_server_counters(void) { - const char * fcn_name = "reset_server_counters()"; hbool_t success = TRUE; int i; long actual_total_reads = 0; @@ -1319,7 +1308,7 @@ reset_server_counters(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, actual_total_reads, total_reads); } } @@ -1330,7 +1319,7 @@ reset_server_counters(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, actual_total_writes, total_writes); } } @@ -1369,7 +1358,6 @@ reset_server_counters(void) static hbool_t server_main(void) { - const char * fcn_name = "server_main()"; hbool_t done = FALSE; hbool_t success = TRUE; int done_count = 0; @@ -1381,7 +1369,7 @@ server_main(void) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -1401,7 +1389,7 @@ server_main(void) case WRITE_REQ_ACK_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received write ack?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received write ack?!?.\n", FUNC); break; case READ_REQ_CODE: @@ -1411,7 +1399,7 @@ server_main(void) case READ_REQ_REPLY_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received read req reply?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received read req reply?!?.\n", FUNC); break; case SYNC_REQ_CODE: @@ -1421,7 +1409,7 @@ server_main(void) case SYNC_ACK_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received sync ack?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received sync ack?!?.\n", FUNC); break; case REQ_TTL_WRITES_CODE: @@ -1431,7 +1419,7 @@ server_main(void) case REQ_TTL_WRITES_RPLY_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received total writes reply?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received total writes reply?!?.\n", FUNC); break; case REQ_TTL_READS_CODE: @@ -1441,7 +1429,7 @@ server_main(void) case REQ_TTL_READS_RPLY_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received total reads reply?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received total reads reply?!?.\n", FUNC); break; case REQ_ENTRY_WRITES_CODE: @@ -1451,7 +1439,7 @@ server_main(void) case REQ_ENTRY_WRITES_RPLY_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", FUNC); break; case REQ_ENTRY_READS_CODE: @@ -1461,7 +1449,7 @@ server_main(void) case REQ_ENTRY_READS_RPLY_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", FUNC); break; case REQ_RW_COUNT_RESET_CODE: @@ -1471,7 +1459,7 @@ server_main(void) case REQ_RW_COUNT_RESET_RPLY_CODE: success = FALSE; if(verbose) - HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", fcn_name); + HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", FUNC); break; case DONE_REQ_CODE: @@ -1484,7 +1472,7 @@ server_main(void) nerrors++; success = FALSE; if(verbose) - HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC); break; } } @@ -1516,7 +1504,6 @@ server_main(void) static hbool_t serve_read_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_read_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; int target_index; @@ -1531,7 +1518,7 @@ serve_read_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -1546,7 +1533,7 @@ serve_read_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + world_mpi_rank, FUNC, target_addr); } } else if ( data[target_index].len != mssg_ptr->len ) { @@ -1555,7 +1542,7 @@ serve_read_request(struct mssg_t * mssg_ptr) if ( verbose ) { HDfprintf(stdout, "%d:%s: data[i].len = %Zu != mssg->len = %d.\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, data[target_index].len, mssg_ptr->len); } } else if ( ! (data[target_index].valid) ) { @@ -1565,7 +1552,7 @@ serve_read_request(struct mssg_t * mssg_ptr) if ( verbose ) { HDfprintf(stdout, "%d:%s: proc %d read invalid entry. idx/base_addr = %d/%a.\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, mssg_ptr->src, target_index, data[target_index].base_addr); @@ -1644,7 +1631,6 @@ serve_read_request(struct mssg_t * mssg_ptr) static hbool_t serve_sync_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_sync_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; struct mssg_t reply; @@ -1657,7 +1643,7 @@ serve_sync_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -1719,7 +1705,6 @@ serve_sync_request(struct mssg_t * mssg_ptr) static hbool_t serve_write_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_write_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; int target_index; @@ -1737,7 +1722,7 @@ serve_write_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -1752,7 +1737,7 @@ serve_write_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + world_mpi_rank, FUNC, target_addr); } } else if ( data[target_index].len != mssg_ptr->len ) { @@ -1761,7 +1746,7 @@ serve_write_request(struct mssg_t * mssg_ptr) if ( verbose ) { HDfprintf(stdout, "%d:%s: data[i].len = %Zu != mssg->len = %d.\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, data[target_index].len, mssg_ptr->len); } } @@ -1778,7 +1763,7 @@ serve_write_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, new_ver_num, data[target_index].ver); } } @@ -1862,7 +1847,6 @@ serve_write_request(struct mssg_t * mssg_ptr) static hbool_t serve_total_writes_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_total_writes_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; struct mssg_t reply; @@ -1875,7 +1859,7 @@ serve_total_writes_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -1942,7 +1926,6 @@ serve_total_writes_request(struct mssg_t * mssg_ptr) static hbool_t serve_total_reads_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_total_reads_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; struct mssg_t reply; @@ -1955,7 +1938,7 @@ serve_total_reads_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -2022,7 +2005,6 @@ serve_total_reads_request(struct mssg_t * mssg_ptr) static hbool_t serve_entry_writes_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_entry_writes_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; int target_index; @@ -2037,7 +2019,7 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -2052,7 +2034,7 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + world_mpi_rank, FUNC, target_addr); } } else { @@ -2120,7 +2102,6 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr) static hbool_t serve_entry_reads_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_entry_reads_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; int target_index; @@ -2135,7 +2116,7 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -2150,7 +2131,7 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + world_mpi_rank, FUNC, target_addr); } } else { @@ -2217,7 +2198,6 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr) static hbool_t serve_rw_count_reset_request(struct mssg_t * mssg_ptr) { - const char * fcn_name = "serve_rw_count_reset_request()"; hbool_t report_mssg = FALSE; hbool_t success = TRUE; struct mssg_t reply; @@ -2230,7 +2210,7 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -2417,7 +2397,6 @@ flush_datum(H5F_t *f, haddr_t H5_ATTR_UNUSED addr, void *thing) { - const char * fcn_name = "flush_datum()"; hbool_t was_dirty = FALSE; herr_t ret_value = SUCCEED; int idx; @@ -2464,7 +2443,7 @@ flush_datum(H5F_t *f, ret_value = FAIL; HDfprintf(stdout, "%d:%s: Flushed dirty entry from non-zero file process.", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } if ( ret_value == SUCCEED ) { @@ -2490,7 +2469,7 @@ flush_datum(H5F_t *f, ret_value = FAIL; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else @@ -2512,7 +2491,7 @@ flush_datum(H5F_t *f, ret_value = FAIL; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( ( mssg.req != WRITE_REQ_ACK_CODE ) || ( mssg.src != world_server_mpi_rank ) || @@ -2526,7 +2505,7 @@ flush_datum(H5F_t *f, ret_value = FAIL; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -2575,7 +2554,6 @@ load_datum(H5F_t H5_ATTR_UNUSED *f, haddr_t addr, void H5_ATTR_UNUSED *udata) { - const char * fcn_name = "load_datum()"; hbool_t success = TRUE; int idx; struct datum * entry_ptr = NULL; @@ -2610,7 +2588,7 @@ load_datum(H5F_t H5_ATTR_UNUSED *f, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -2622,7 +2600,7 @@ load_datum(H5F_t H5_ATTR_UNUSED *f, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -2641,42 +2619,42 @@ load_datum(H5F_t H5_ATTR_UNUSED *f, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } #if 0 /* This has been useful debugging code -- keep it for now. */ if ( mssg.req != READ_REQ_REPLY_CODE ) { HDfprintf(stdout, "%d:%s: mssg.req != READ_REQ_REPLY_CODE.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); HDfprintf(stdout, "%d:%s: mssg.req = %d.\n", - world_mpi_rank, fcn_name, (int)(mssg.req)); + world_mpi_rank, FUNC, (int)(mssg.req)); } if ( mssg.src != world_server_mpi_rank ) { HDfprintf(stdout, "%d:%s: mssg.src != world_server_mpi_rank.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } if ( mssg.dest != world_mpi_rank ) { HDfprintf(stdout, "%d:%s: mssg.dest != world_mpi_rank.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } if ( mssg.base_addr != entry_ptr->base_addr ) { HDfprintf(stdout, "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n", - world_mpi_rank, fcn_name, mssg.base_addr); + world_mpi_rank, FUNC, mssg.base_addr); HDfprintf(stdout, "%d:%s: entry_ptr->base_addr = %a.\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, entry_ptr->base_addr); } @@ -2684,22 +2662,22 @@ load_datum(H5F_t H5_ATTR_UNUSED *f, HDfprintf(stdout, "%d:%s: mssg.len != entry_ptr->len.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); HDfprintf(stdout, "%d:%s: mssg.len = %a.\n", - world_mpi_rank, fcn_name, mssg.len); + world_mpi_rank, FUNC, mssg.len); } if ( mssg.ver < entry_ptr->ver ) { HDfprintf(stdout, "%d:%s: mssg.ver < entry_ptr->ver.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } if ( mssg.magic != MSSG_MAGIC ) { HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } #endif /* JRM */ @@ -2800,7 +2778,6 @@ static void expunge_entry(H5F_t * file_ptr, int32_t idx) { - const char * fcn_name = "expunge_entry()"; hbool_t in_cache; herr_t result; struct datum * entry_ptr; @@ -2825,7 +2802,7 @@ expunge_entry(H5F_t * file_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -2840,14 +2817,14 @@ expunge_entry(H5F_t * file_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( in_cache ) { nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -2884,7 +2861,6 @@ insert_entry(H5C_t * cache_ptr, int32_t idx, unsigned int flags) { - const char * fcn_name = "insert_entry()"; hbool_t insert_pinned; herr_t result; struct datum * entry_ptr; @@ -2916,7 +2892,7 @@ insert_entry(H5C_t * cache_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -2939,7 +2915,7 @@ insert_entry(H5C_t * cache_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", - world_mpi_rank, fcn_name, idx, + world_mpi_rank, FUNC, idx, (int)(data[idx].header.is_dirty)); } } @@ -2988,7 +2964,6 @@ local_pin_and_unpin_random_entries(H5F_t * file_ptr, int min_count, int max_count) { - /* const char * fcn_name = "local_pin_and_unpin_random_entries()"; */ if ( nerrors == 0 ) { @@ -3057,7 +3032,6 @@ local_pin_random_entry(H5F_t * file_ptr, int min_idx, int max_idx) { - /* const char * fcn_name = "local_pin_random_entry()"; */ int idx; if ( nerrors == 0 ) { @@ -3101,7 +3075,6 @@ static void local_unpin_all_entries(H5F_t * file_ptr, hbool_t via_unprotect) { - /* const char * fcn_name = "local_unpin_all_entries()"; */ if ( nerrors == 0 ) { @@ -3144,7 +3117,6 @@ local_unpin_next_pinned_entry(H5F_t * file_ptr, int start_idx, hbool_t via_unprotect) { - /* const char * fcn_name = "local_unpin_next_pinned_entry()"; */ int i = 0; int idx = -1; @@ -3204,7 +3176,6 @@ lock_and_unlock_random_entries(H5F_t * file_ptr, int min_count, int max_count) { - /* const char * fcn_name = "lock_and_unlock_random_entries()"; */ int count; int i; @@ -3249,7 +3220,6 @@ lock_and_unlock_random_entry(H5F_t * file_ptr, int min_idx, int max_idx) { - /* const char * fcn_name = "lock_and_unlock_random_entry()"; */ int idx; if ( nerrors == 0 ) { @@ -3297,7 +3267,6 @@ static void lock_entry(H5F_t * file_ptr, int32_t idx) { - const char * fcn_name = "lock_entry()"; struct datum * entry_ptr; H5C_cache_entry_t * cache_entry_ptr; @@ -3323,7 +3292,7 @@ lock_entry(H5F_t * file_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -3355,7 +3324,6 @@ lock_entry(H5F_t * file_ptr, static void mark_entry_dirty(int32_t idx) { - const char * fcn_name = "mark_entry_dirty()"; herr_t result; struct datum * entry_ptr; @@ -3380,7 +3348,7 @@ mark_entry_dirty(int32_t idx) if ( verbose ) { HDfprintf(stdout, "%d:%s: error in H5AC_mark_entry_dirty().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( ! ( entry_ptr->locked ) ) @@ -3413,7 +3381,6 @@ pin_entry(H5F_t * file_ptr, hbool_t global, hbool_t dirty) { - /* const char * fcn_name = "pin_entry()"; */ unsigned int flags = H5AC__PIN_ENTRY_FLAG; struct datum * entry_ptr; @@ -3480,7 +3447,6 @@ static void pin_protected_entry(int32_t idx, hbool_t global) { - const char * fcn_name = "pin_protected_entry()"; herr_t result; struct datum * entry_ptr; @@ -3506,7 +3472,7 @@ pin_protected_entry(int32_t idx, if ( verbose ) { HDfprintf(stdout, "%d:%s: Error in H5AC_pin_protected entry().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -3554,7 +3520,6 @@ move_entry(H5F_t * file_ptr, int32_t old_idx, int32_t new_idx) { - const char * fcn_name = "move_entry()"; herr_t result; int tmp; size_t tmp_len; @@ -3617,7 +3582,7 @@ move_entry(H5F_t * file_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -3644,7 +3609,7 @@ move_entry(H5F_t * file_ptr, if ( verbose ) { HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", - world_mpi_rank, fcn_name, new_idx, + world_mpi_rank, FUNC, new_idx, (int)(data[new_idx].header.is_dirty)); } } @@ -3675,7 +3640,6 @@ move_entry(H5F_t * file_ptr, static hbool_t reset_server_counts(void) { - const char * fcn_name = "reset_server_counts()"; hbool_t success = TRUE; /* will set to FALSE if appropriate. */ struct mssg_t mssg; @@ -3698,7 +3662,7 @@ reset_server_counts(void) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -3711,7 +3675,7 @@ reset_server_counts(void) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( ( mssg.req != REQ_RW_COUNT_RESET_RPLY_CODE ) || ( mssg.src != world_server_mpi_rank ) || @@ -3727,7 +3691,7 @@ reset_server_counts(void) if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in req r/w counter reset reply.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -3756,7 +3720,6 @@ static void resize_entry(int32_t idx, size_t new_size) { - const char * fcn_name = "resize_entry()"; herr_t result; struct datum * entry_ptr; @@ -3783,7 +3746,7 @@ resize_entry(int32_t idx, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -3831,7 +3794,6 @@ setup_cache_for_test(hid_t * fid_ptr, H5C_t ** cache_ptr_ptr, int metadata_write_strategy) { - const char * fcn_name = "setup_cache_for_test()"; hbool_t success = FALSE; /* will set to TRUE if appropriate. */ hbool_t enable_rpt_fcn = FALSE; hid_t fid = -1; @@ -3850,13 +3812,13 @@ setup_cache_for_test(hid_t * fid_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { file_ptr = (H5F_t *)H5I_object_verify(fid, H5I_FILE); @@ -3866,7 +3828,7 @@ setup_cache_for_test(hid_t * fid_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { cache_ptr = file_ptr->shared->cache; @@ -3876,13 +3838,13 @@ setup_cache_for_test(hid_t * fid_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( cache_ptr->magic != H5C__H5C_T_MAGIC ) { nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { cache_ptr->ignore_tags = TRUE; @@ -3902,7 +3864,7 @@ setup_cache_for_test(hid_t * fid_ptr, HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } else { @@ -3914,12 +3876,12 @@ setup_cache_for_test(hid_t * fid_ptr, HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } else if ( enable_rpt_fcn ) { HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -3936,7 +3898,7 @@ setup_cache_for_test(hid_t * fid_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC ) { @@ -3945,7 +3907,7 @@ setup_cache_for_test(hid_t * fid_ptr, if ( verbose ) { HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy != metadata_write_strategy ) { @@ -3954,7 +3916,7 @@ setup_cache_for_test(hid_t * fid_ptr, if ( verbose ) { HDfprintf(stdout, "%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -3972,7 +3934,7 @@ setup_cache_for_test(hid_t * fid_ptr, HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } else if ( test_config.metadata_write_strategy != metadata_write_strategy ) { @@ -3983,7 +3945,7 @@ setup_cache_for_test(hid_t * fid_ptr, HDfprintf(stdout, "%d:%s: unexpected metadata_write_strategy.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -3999,7 +3961,7 @@ setup_cache_for_test(hid_t * fid_ptr, if ( verbose ) { HDfprintf(stdout, "%d:%s: H5C_set_write_done_callback failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4014,7 +3976,7 @@ setup_cache_for_test(hid_t * fid_ptr, if ( verbose ) { HDfprintf(stdout, "%d:%s: H5AC_set_sync_point_done_callback failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4055,7 +4017,6 @@ static void verify_writes(int num_writes, haddr_t * written_entries_tbl) { - const char * fcn_name = "verify_writes()"; const hbool_t report = FALSE; hbool_t proceed = TRUE; int i = 0; @@ -4076,7 +4037,7 @@ verify_writes(int num_writes, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4103,7 +4064,7 @@ verify_writes(int num_writes, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4119,12 +4080,12 @@ verify_writes(int num_writes, if ( proceed ) { HDfprintf(stdout, "%d:%s: verified %d writes.\n", - world_mpi_rank, fcn_name, num_writes); + world_mpi_rank, FUNC, num_writes); } else { HDfprintf(stdout, "%d:%s: FAILED to verify %d writes.\n", - world_mpi_rank, fcn_name, num_writes); + world_mpi_rank, FUNC, num_writes); } } @@ -4143,7 +4104,7 @@ verify_writes(int num_writes, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4175,7 +4136,6 @@ verify_writes(int num_writes, static void setup_rand(void) { - const char * fcn_name = "setup_rand()"; hbool_t use_predefined_seeds = FALSE; int num_predefined_seeds = 3; unsigned predefined_seeds[3] = {33402, 33505, 33422}; @@ -4190,7 +4150,7 @@ setup_rand(void) seed = predefined_seeds[world_mpi_rank]; HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n", - world_mpi_rank, fcn_name, seed); + world_mpi_rank, FUNC, seed); fflush(stdout); HDsrand(seed); @@ -4201,13 +4161,13 @@ setup_rand(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { seed = (unsigned)tv.tv_usec; if ( verbose ) { HDfprintf(stdout, "%d:%s: seed = %d.\n", - world_mpi_rank, fcn_name, seed); + world_mpi_rank, FUNC, seed); fflush(stdout); } HDsrand(seed); @@ -4238,7 +4198,6 @@ setup_rand(void) static hbool_t take_down_cache(hid_t fid) { - const char * fcn_name = "take_down_cache()"; hbool_t success = FALSE; /* will set to TRUE if appropriate. */ /* close the file and delete it */ @@ -4247,7 +4206,7 @@ take_down_cache(hid_t fid) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( world_mpi_rank == world_server_mpi_rank ) { @@ -4257,7 +4216,7 @@ take_down_cache(hid_t fid) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: HDremove() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -4297,7 +4256,6 @@ static hbool_t verify_entry_reads(haddr_t addr, int expected_entry_reads) { - const char * fcn_name = "verify_entry_reads()"; hbool_t success = TRUE; int reported_entry_reads; struct mssg_t mssg; @@ -4321,7 +4279,7 @@ verify_entry_reads(haddr_t addr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4334,7 +4292,7 @@ verify_entry_reads(haddr_t addr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4353,7 +4311,7 @@ verify_entry_reads(haddr_t addr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -4370,7 +4328,7 @@ verify_entry_reads(haddr_t addr, if ( verbose ) { HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx reads mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, (long long)addr, + world_mpi_rank, FUNC, (long long)addr, reported_entry_reads, expected_entry_reads); } } @@ -4405,7 +4363,6 @@ static hbool_t verify_entry_writes(haddr_t addr, int expected_entry_writes) { - const char * fcn_name = "verify_entry_writes()"; hbool_t success = TRUE; int reported_entry_writes; struct mssg_t mssg; @@ -4429,7 +4386,7 @@ verify_entry_writes(haddr_t addr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4442,7 +4399,7 @@ verify_entry_writes(haddr_t addr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4461,7 +4418,7 @@ verify_entry_writes(haddr_t addr, success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -4478,7 +4435,7 @@ verify_entry_writes(haddr_t addr, if ( verbose ) { HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx writes mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, (long long)addr, + world_mpi_rank, FUNC, (long long)addr, reported_entry_writes, expected_entry_writes); } } @@ -4511,7 +4468,6 @@ verify_entry_writes(haddr_t addr, static hbool_t verify_total_reads(int expected_total_reads) { - const char * fcn_name = "verify_total_reads()"; hbool_t success = TRUE; /* will set to FALSE if appropriate. */ long reported_total_reads; struct mssg_t mssg; @@ -4535,7 +4491,7 @@ verify_total_reads(int expected_total_reads) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4548,7 +4504,7 @@ verify_total_reads(int expected_total_reads) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( ( mssg.req != REQ_TTL_READS_RPLY_CODE ) || ( mssg.src != world_server_mpi_rank ) || @@ -4562,7 +4518,7 @@ verify_total_reads(int expected_total_reads) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -4579,7 +4535,7 @@ verify_total_reads(int expected_total_reads) if ( verbose ) { HDfprintf(stdout, "%d:%s: reported/expected total reads mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, reported_total_reads, expected_total_reads); } @@ -4613,7 +4569,6 @@ verify_total_reads(int expected_total_reads) static hbool_t verify_total_writes(int expected_total_writes) { - const char * fcn_name = "verify_total_writes()"; hbool_t success = TRUE; /* will set to FALSE if appropriate. */ long reported_total_writes; struct mssg_t mssg; @@ -4637,7 +4592,7 @@ verify_total_writes(int expected_total_writes) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4650,7 +4605,7 @@ verify_total_writes(int expected_total_writes) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else if ( ( mssg.req != REQ_TTL_WRITES_RPLY_CODE ) || ( mssg.src != world_server_mpi_rank ) || @@ -4664,7 +4619,7 @@ verify_total_writes(int expected_total_writes) success = FALSE; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -4681,7 +4636,7 @@ verify_total_writes(int expected_total_writes) if ( verbose ) { HDfprintf(stdout, "%d:%s: reported/expected total writes mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, + world_mpi_rank, FUNC, reported_total_writes, expected_total_writes); } } @@ -4715,7 +4670,6 @@ unlock_entry(H5F_t * file_ptr, int32_t idx, unsigned int flags) { - const char * fcn_name = "unlock_entry()"; herr_t dirtied; herr_t result; struct datum * entry_ptr; @@ -4750,7 +4704,7 @@ unlock_entry(H5F_t * file_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: error in H5C_unprotect().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } else { @@ -4803,7 +4757,6 @@ unpin_entry(H5F_t * file_ptr, hbool_t dirty, hbool_t via_unprotect) { - const char * fcn_name = "unpin_entry()"; herr_t result; unsigned int flags = H5AC__UNPIN_ENTRY_FLAG; struct datum * entry_ptr; @@ -4848,7 +4801,7 @@ unpin_entry(H5F_t * file_ptr, nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4892,7 +4845,6 @@ unpin_entry(H5F_t * file_ptr, static hbool_t server_smoke_check(void) { - const char * fcn_name = "server_smoke_check()"; hbool_t success = TRUE; int max_nerrors; struct mssg_t mssg; @@ -4914,7 +4866,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4936,7 +4888,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -4952,7 +4904,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4972,7 +4924,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -4988,7 +4940,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5021,7 +4973,7 @@ server_smoke_check(void) if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5045,7 +4997,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5060,7 +5012,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5080,7 +5032,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5092,7 +5044,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5124,7 +5076,7 @@ server_smoke_check(void) if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 4 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5141,7 +5093,7 @@ server_smoke_check(void) if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 5 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5173,7 +5125,7 @@ server_smoke_check(void) if ( verbose ) { HDfprintf(stdout, "%d:%s: barrier 6 failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5197,7 +5149,7 @@ server_smoke_check(void) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5241,7 +5193,6 @@ server_smoke_check(void) static hbool_t smoke_check_1(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_1()"; hbool_t success = TRUE; int i; int max_nerrors; @@ -5283,7 +5234,7 @@ smoke_check_1(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5297,7 +5248,7 @@ smoke_check_1(int metadata_write_strategy) cache_ptr = NULL; if ( verbose ) { HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5335,7 +5286,7 @@ smoke_check_1(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5370,7 +5321,7 @@ smoke_check_1(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5417,7 +5368,6 @@ smoke_check_1(int metadata_write_strategy) static hbool_t smoke_check_2(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_2()"; hbool_t success = TRUE; int i; int max_nerrors; @@ -5459,7 +5409,7 @@ smoke_check_2(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5473,7 +5423,7 @@ smoke_check_2(int metadata_write_strategy) cache_ptr = NULL; if ( verbose ) { HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5558,7 +5508,7 @@ smoke_check_2(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5593,7 +5543,7 @@ smoke_check_2(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5643,7 +5593,6 @@ smoke_check_2(int metadata_write_strategy) static hbool_t smoke_check_3(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_3()"; hbool_t success = TRUE; int i; int max_nerrors; @@ -5689,7 +5638,7 @@ smoke_check_3(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5703,7 +5652,7 @@ smoke_check_3(int metadata_write_strategy) cache_ptr = NULL; if ( verbose ) { HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5768,7 +5717,7 @@ smoke_check_3(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -5882,7 +5831,7 @@ smoke_check_3(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5918,7 +5867,7 @@ smoke_check_3(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -5968,7 +5917,6 @@ smoke_check_3(int metadata_write_strategy) static hbool_t smoke_check_4(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_4()"; hbool_t success = TRUE; int i; int max_nerrors; @@ -6014,7 +5962,7 @@ smoke_check_4(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6028,7 +5976,7 @@ smoke_check_4(int metadata_write_strategy) cache_ptr = NULL; if ( verbose ) { HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -6104,7 +6052,7 @@ smoke_check_4(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -6200,7 +6148,7 @@ smoke_check_4(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6236,7 +6184,7 @@ smoke_check_4(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6281,7 +6229,6 @@ smoke_check_4(int metadata_write_strategy) static hbool_t smoke_check_5(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_5()"; hbool_t success = TRUE; int i; int max_nerrors; @@ -6324,7 +6271,7 @@ smoke_check_5(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6339,7 +6286,7 @@ smoke_check_5(int metadata_write_strategy) cache_ptr = NULL; if ( verbose ) { HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -6353,7 +6300,7 @@ smoke_check_5(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -6411,7 +6358,7 @@ smoke_check_5(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6446,7 +6393,7 @@ smoke_check_5(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6520,7 +6467,6 @@ trace_file_check(int metadata_write_strategy) #ifdef H5_METADATA_TRACE_FILE - const char * fcn_name = "trace_file_check()"; const char *((* expected_output)[]) = NULL; const char * expected_output_0[] = { @@ -6643,7 +6589,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6658,7 +6604,7 @@ trace_file_check(int metadata_write_strategy) cache_ptr = NULL; if ( verbose ) { HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -6672,7 +6618,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } else { @@ -6685,7 +6631,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6721,7 +6667,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -6735,7 +6681,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } else { @@ -6749,7 +6695,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6761,7 +6707,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6796,7 +6742,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6811,7 +6757,7 @@ trace_file_check(int metadata_write_strategy) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: HDfopen failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } @@ -6848,12 +6794,12 @@ trace_file_check(int metadata_write_strategy) if ( verbose ) { HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", - world_mpi_rank, fcn_name, i); + world_mpi_rank, FUNC, i); HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", - world_mpi_rank, fcn_name, (*expected_output)[i], + world_mpi_rank, FUNC, (*expected_output)[i], expected_line_len); HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", - world_mpi_rank, fcn_name, buffer, + world_mpi_rank, FUNC, buffer, actual_line_len); } } else { @@ -6920,7 +6866,6 @@ trace_file_check(int metadata_write_strategy) int main(int argc, char **argv) { - const char * fcn_name = "main()"; int express_test; unsigned u; int mpi_size; @@ -6999,7 +6944,7 @@ main(int argc, char **argv) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -7008,7 +6953,7 @@ main(int argc, char **argv) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -7021,7 +6966,7 @@ main(int argc, char **argv) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } break; } @@ -7032,7 +6977,7 @@ main(int argc, char **argv) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -7044,7 +6989,7 @@ main(int argc, char **argv) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } @@ -7053,7 +6998,7 @@ main(int argc, char **argv) nerrors++; if ( verbose ) { HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", - world_mpi_rank, fcn_name); + world_mpi_rank, FUNC); } } } -- cgit v0.12