diff options
author | Quincey Koziol <koziol@hdfgroup.org> | 2015-05-15 01:48:50 (GMT) |
---|---|---|
committer | Quincey Koziol <koziol@hdfgroup.org> | 2015-05-15 01:48:50 (GMT) |
commit | 7206d78c523553b6192ac7d6befd54656d070130 (patch) | |
tree | ccd8164f0a85b47dee2cb1cd25293c6015a7ccb6 | |
parent | 775a54c19a86e0c0fcd3c5589d0248c015719250 (diff) | |
download | hdf5-7206d78c523553b6192ac7d6befd54656d070130.zip hdf5-7206d78c523553b6192ac7d6befd54656d070130.tar.gz hdf5-7206d78c523553b6192ac7d6befd54656d070130.tar.bz2 |
[svn-r27072] Description:
Clean up H5B interface, to align w/v3 metadata cache changes
Tested on:
MacOSX/64 10.10.3 (amazon) w/serial & parallel
Linux/32 2.6.* (jam) w/serial & parallel
-rw-r--r-- | src/H5ACprivate.h | 50 | ||||
-rw-r--r-- | src/H5C.c | 385 | ||||
-rw-r--r-- | src/H5Cpkg.h | 34 | ||||
-rw-r--r-- | src/H5Cprivate.h | 52 |
4 files changed, 203 insertions, 318 deletions
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h index 0a958b0..cf4c122 100644 --- a/src/H5ACprivate.h +++ b/src/H5ACprivate.h @@ -340,7 +340,7 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id; H5_DLL herr_t H5AC_init(void); H5_DLL herr_t H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr); H5_DLL herr_t H5AC_get_entry_status(const H5F_t *f, haddr_t addr, - unsigned * status_ptr); + unsigned *status_ptr); H5_DLL herr_t H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr, void *thing, unsigned int flags); H5_DLL herr_t H5AC_pin_protected_entry(void *thing); @@ -350,57 +350,35 @@ H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, H5_DLL herr_t H5AC_resize_entry(void *thing, size_t new_size); H5_DLL herr_t H5AC_unpin_entry(void *thing); H5_DLL herr_t H5AC_destroy_flush_dependency(void *parent_thing, void *child_thing); -H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id, - const H5AC_class_t *type, haddr_t addr, - void *thing, unsigned flags); +H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, + haddr_t addr, void *thing, unsigned flags); H5_DLL herr_t H5AC_flush(H5F_t *f, hid_t dxpl_id); H5_DLL herr_t H5AC_mark_entry_dirty(void *thing); H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, - haddr_t old_addr, haddr_t new_addr); - + haddr_t old_addr, haddr_t new_addr); H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id); - H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, - const H5AC_class_t *type, haddr_t addr, - unsigned flags); - + const H5AC_class_t *type, haddr_t addr, unsigned flags); H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr, void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl)); - H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr, void (* write_done)(void)); H5_DLL herr_t H5AC_stats(const H5F_t *f); - H5_DLL herr_t H5AC_dump_cache(const H5F_t *f); - H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr, - H5AC_cache_config_t *config_ptr); - -H5_DLL herr_t H5AC_get_cache_size(H5AC_t * cache_ptr, - size_t * max_size_ptr, - size_t * min_clean_size_ptr, - size_t * cur_size_ptr, - int32_t * cur_num_entries_ptr); - -H5_DLL herr_t H5AC_get_cache_hit_rate(H5AC_t * cache_ptr, - double * hit_rate_ptr); - -H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t * cache_ptr); - + H5AC_cache_config_t *config_ptr); +H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, + size_t *min_clean_size_ptr, size_t *cur_size_ptr, int32_t *cur_num_entries_ptr); +H5_DLL herr_t H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr); +H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t *cache_ptr); H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, - H5AC_cache_config_t *config_ptr); - -H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t * config_ptr); - -H5_DLL herr_t H5AC_close_trace_file( H5AC_t * cache_ptr); - -H5_DLL herr_t H5AC_open_trace_file(H5AC_t * cache_ptr, - const char * trace_file_name); + H5AC_cache_config_t *config_ptr); +H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr); +H5_DLL herr_t H5AC_close_trace_file(H5AC_t *cache_ptr); +H5_DLL herr_t H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_name); H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t * prev_tag); - H5_DLL herr_t H5AC_retag_copied_metadata(H5F_t * f, haddr_t metadata_tag); - H5_DLL herr_t H5AC_ignore_tags(H5F_t * f); #ifdef H5_HAVE_PARALLEL @@ -229,6 +229,8 @@ const H5C_class_t epoch_marker_class = /* size = */ &H5C_epoch_marker_size }; + + /*************************************************************************** * Class functions for H5C__EPOCH_MAKER_TYPE: * @@ -255,6 +257,8 @@ done: FUNC_LEAVE_NOAPI(ret_value) } + + static herr_t H5C_epoch_marker_flush(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, @@ -274,6 +278,8 @@ done: FUNC_LEAVE_NOAPI(ret_value) } + + static herr_t H5C_epoch_marker_dest(H5F_t UNUSED * f, void UNUSED * thing) @@ -784,7 +790,7 @@ H5C_apply_candidate_list(H5F_t * f, entries_flushed++; #if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 ) - HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank, + HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank, (long long)flush_ptr->addr); #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ @@ -1604,48 +1610,38 @@ H5C_expunge_entry(H5F_t * f, FUNC_ENTER_NOAPI(FAIL) - HDassert( f ); - HDassert( f->shared ); + HDassert(f); + HDassert(f->shared); cache_ptr = f->shared->cache; - HDassert( cache_ptr ); - HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); - HDassert( type ); - HDassert( type->clear ); - HDassert( type->dest ); - HDassert( H5F_addr_defined(addr) ); + HDassert(cache_ptr); + HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); + HDassert(type); + HDassert(type->clear); + HDassert(type->dest); + HDassert(H5F_addr_defined(addr)); #if H5C_DO_EXTREME_SANITY_CHECKS - if ( H5C_validate_lru_list(cache_ptr) < 0 ) { - - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "LRU extreme sanity check failed on entry.\n"); - } + if(H5C_validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "LRU extreme sanity check failed on entry.\n"); #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ + /* Look for entry in cache */ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) - - if ( ( entry_ptr == NULL ) || ( entry_ptr->type != type ) ) { - + if((entry_ptr == NULL) || (entry_ptr->type != type)) /* the target doesn't exist in the cache, so we are done. */ HGOTO_DONE(SUCCEED) - } - - HDassert( entry_ptr->addr == addr ); - HDassert( entry_ptr->type == type ); - - if ( entry_ptr->is_protected ) { - - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \ - "Target entry is protected.") - } - if ( entry_ptr->is_pinned ) { + HDassert(entry_ptr->addr == addr); + HDassert(entry_ptr->type == type); - HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, \ - "Target entry is pinned.") - } + /* Check for entry being pinned or protected */ + if(entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected.") + if(entry_ptr->is_pinned) + HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned.") /* Pass along 'free file space' flag to cache client */ entry_ptr->free_file_space_on_destroy = ( (flags & H5C__FREE_FILE_SPACE_FLAG) != 0 ); @@ -1670,17 +1666,13 @@ H5C_expunge_entry(H5F_t * f, } done: - #if H5C_DO_EXTREME_SANITY_CHECKS - if ( H5C_validate_lru_list(cache_ptr) < 0 ) { - - HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ - "LRU extreme sanity check failed on exit.\n"); - } + if(H5C_validate_lru_list(cache_ptr) < 0) + HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ + "LRU extreme sanity check failed on exit.\n"); #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_expunge_entry() */ @@ -1965,8 +1957,8 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign FALSE); if ( status < 0 ) { - /* This shouldn't happen -- if it does, we are toast - * so just scream and die. + /* This shouldn't happen -- if it does, + * we are toast so just scream and die. */ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ "dirty pinned entry flush failed.") @@ -1983,7 +1975,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign * aren't trying to do a destroy here, so that * is not an issue. */ - if(entry_ptr->flush_dep_height == curr_flush_dep_height ){ + if(entry_ptr->flush_dep_height == curr_flush_dep_height ) { #if H5C_DO_SANITY_CHECKS flushed_entries_count++; flushed_entries_size += entry_ptr->size; @@ -1998,8 +1990,8 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign FALSE); if ( status < 0 ) { - /* This shouldn't happen -- if it does, we are - * toast so just scream and die. + /* This shouldn't happen -- if it does, + * we are toast so just scream and die. */ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ "Can't flush entry.") @@ -2111,7 +2103,7 @@ H5C_flush_to_min_clean(H5F_t * f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id) { - H5C_t * cache_ptr; + H5C_t * cache_ptr; herr_t result; hbool_t first_flush = TRUE; hbool_t write_permitted; @@ -2396,41 +2388,28 @@ done: *------------------------------------------------------------------------- */ herr_t -H5C_get_cache_hit_rate(H5C_t * cache_ptr, - double * hit_rate_ptr) - +H5C_get_cache_hit_rate(H5C_t * cache_ptr, double * hit_rate_ptr) { herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) - if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) ) { - + if((cache_ptr == NULL ) || (cache_ptr->magic != H5C__H5C_T_MAGIC)) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.") - } - - if ( hit_rate_ptr == NULL ) { - + if(hit_rate_ptr == NULL) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad hit_rate_ptr on entry.") - } - HDassert( cache_ptr->cache_hits >= 0 ); - HDassert( cache_ptr->cache_accesses >= cache_ptr->cache_hits ); - - if ( cache_ptr->cache_accesses > 0 ) { + HDassert(cache_ptr->cache_hits >= 0); + HDassert(cache_ptr->cache_accesses >= cache_ptr->cache_hits); + if(cache_ptr->cache_accesses > 0) *hit_rate_ptr = ((double)(cache_ptr->cache_hits)) / ((double)(cache_ptr->cache_accesses)); - - } else { - + else *hit_rate_ptr = 0.0f; - } done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_get_cache_hit_rate() */ @@ -2686,10 +2665,10 @@ H5C_insert_entry(H5F_t * f, herr_t result; hbool_t first_flush = TRUE; hbool_t insert_pinned; - hbool_t flush_last; + hbool_t flush_last; #ifdef H5_HAVE_PARALLEL - hbool_t flush_collectively; -#endif + hbool_t flush_collectively; +#endif /* H5_HAVE_PARALLEL */ hbool_t set_flush_marker; hbool_t write_permitted = TRUE; size_t empty_space; @@ -2731,7 +2710,7 @@ H5C_insert_entry(H5F_t * f, flush_last = ( (flags & H5C__FLUSH_LAST_FLAG) != 0 ); #ifdef H5_HAVE_PARALLEL flush_collectively = ( (flags & H5C__FLUSH_COLLECTIVELY_FLAG) != 0 ); -#endif +#endif /* H5_HAVE_PARALLEL */ entry_ptr = (H5C_cache_entry_t *)thing; @@ -2741,19 +2720,12 @@ H5C_insert_entry(H5F_t * f, H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL) - if ( test_entry_ptr != NULL ) { - - if ( test_entry_ptr == entry_ptr ) { - - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \ - "entry already in cache.") - - } else { - - HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \ - "duplicate entry in cache.") - } - } + if(test_entry_ptr != NULL) { + if(test_entry_ptr == entry_ptr) + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache.") + else + HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache.") + } /* end if */ #ifndef NDEBUG entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC; @@ -2829,28 +2801,17 @@ H5C_insert_entry(H5F_t * f, } } - if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) { - + if(cache_ptr->index_size >= cache_ptr->max_cache_size) empty_space = 0; - - } else { - + else empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - } - - if ( ( cache_ptr->evictions_enabled ) - && + if ( ( cache_ptr->evictions_enabled ) && ( ( (cache_ptr->index_size + entry_ptr->size) > - cache_ptr->max_cache_size - ) + cache_ptr->max_cache_size) || - ( - ( ( empty_space + cache_ptr->clean_index_size ) < - cache_ptr->min_clean_size ) - ) - ) - ) { + ( ( ( empty_space + cache_ptr->clean_index_size ) < + cache_ptr->min_clean_size ) ) ) ) { size_t space_needed; @@ -3364,11 +3325,6 @@ done: * Programmer: John Mainzer * 6/2/04 * - * JRM -- 11/5/08 - * On review this function looks like no change is needed to - * support the new clean_index_size and dirty_index_size - * fields of H5C_t. - * *------------------------------------------------------------------------- */ herr_t @@ -3377,11 +3333,11 @@ H5C_move_entry(H5C_t * cache_ptr, haddr_t old_addr, haddr_t new_addr) { + H5C_cache_entry_t * entry_ptr = NULL; + H5C_cache_entry_t * test_entry_ptr = NULL; #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS hbool_t was_dirty; #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */ - H5C_cache_entry_t * entry_ptr = NULL; - H5C_cache_entry_t * test_entry_ptr = NULL; #if H5C_DO_SANITY_CHECKS hbool_t removed_entry_from_slist = FALSE; #endif /* H5C_DO_SANITY_CHECKS */ @@ -3881,10 +3837,7 @@ H5C_protect(H5F_t * f, } #endif /* H5C_DO_EXTREME_SANITY_CHECKS */ - if ( (flags & H5C__READ_ONLY_FLAG) != 0 ) - { - read_only = TRUE; - } + read_only = ( (flags & H5C__READ_ONLY_FLAG) != 0 ); /* first check to see if the target is in cache */ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL) @@ -3895,7 +3848,7 @@ H5C_protect(H5F_t * f, if(entry_ptr->type != type) HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type") - #if H5C_DO_TAGGING_SANITY_CHECKS +#if H5C_DO_TAGGING_SANITY_CHECKS /* The entry is already in the cache, but make sure that the tag value being passed in via dxpl is still legal. This will ensure that had the entry NOT been in the cache, tagging was still set up correctly @@ -3918,8 +3871,8 @@ H5C_protect(H5F_t * f, HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed"); } /* end if */ - #endif - +#endif + hit = TRUE; thing = (void *)entry_ptr; @@ -3958,21 +3911,15 @@ H5C_protect(H5F_t * f, } } - if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) { - + if(cache_ptr->index_size >= cache_ptr->max_cache_size) empty_space = 0; - - } else { - + else empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - } - /* try to free up if necceary and if evictions are permitted. Note * that if evictions are enabled, we will call H5C_make_space_in_cache() * regardless if the min_free_space requirement is not met. */ - if ( ( cache_ptr->evictions_enabled ) && ( ( (cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) @@ -3984,12 +3931,9 @@ H5C_protect(H5F_t * f, size_t space_needed; - if ( empty_space <= entry_ptr->size ) { - + if(empty_space <= entry_ptr->size) cache_ptr->cache_full = TRUE; - } - if ( cache_ptr->check_write_permitted != NULL ) { result = (cache_ptr->check_write_permitted)(f, @@ -4195,25 +4139,18 @@ H5C_protect(H5F_t * f, * into complience. */ - if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) { - + if(cache_ptr->index_size >= cache_ptr->max_cache_size) empty_space = 0; - - } else { - + else empty_space = cache_ptr->max_cache_size - cache_ptr->index_size; - } - if ( ( cache_ptr->index_size > cache_ptr->max_cache_size ) || ( ( empty_space + cache_ptr->clean_index_size ) < cache_ptr->min_clean_size) ) { - if ( cache_ptr->index_size > cache_ptr->max_cache_size ) { - + if(cache_ptr->index_size > cache_ptr->max_cache_size) cache_ptr->cache_full = TRUE; - } result = H5C_make_space_in_cache(f, primary_dxpl_id, secondary_dxpl_id, @@ -4651,9 +4588,7 @@ H5C_set_prefix(H5C_t * cache_ptr, char * prefix) cache_ptr->prefix[H5C__PREFIX_LEN - 1] = '\0'; done: - FUNC_LEAVE_NOAPI(ret_value) - } /* H5C_set_prefix() */ @@ -5564,7 +5499,7 @@ H5C_unprotect(H5F_t * f, void * thing, unsigned int flags) { - H5C_t * cache_ptr; + H5C_t * cache_ptr; hbool_t deleted; hbool_t dirtied; hbool_t set_flush_marker; @@ -5582,13 +5517,13 @@ H5C_unprotect(H5F_t * f, FUNC_ENTER_NOAPI(FAIL) - deleted = ( (flags & H5C__DELETED_FLAG) != 0 ); - dirtied = ( (flags & H5C__DIRTIED_FLAG) != 0 ); - set_flush_marker = ( (flags & H5C__SET_FLUSH_MARKER_FLAG) != 0 ); - pin_entry = ( (flags & H5C__PIN_ENTRY_FLAG) != 0 ); - unpin_entry = ( (flags & H5C__UNPIN_ENTRY_FLAG) != 0 ); - free_file_space = ( (flags & H5C__FREE_FILE_SPACE_FLAG) != 0 ); - take_ownership = ( (flags & H5C__TAKE_OWNERSHIP_FLAG) != 0 ); + deleted = ((flags & H5C__DELETED_FLAG) != 0); + dirtied = ((flags & H5C__DIRTIED_FLAG) != 0); + set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0); + pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0); + unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0); + free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0); + take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); HDassert( f ); HDassert( f->shared ); @@ -5632,49 +5567,39 @@ H5C_unprotect(H5F_t * f, * the ro_ref_counter. Don't actually unprotect until the ref count * drops to zero. */ - if ( entry_ptr->ro_ref_count > 1 ) { - - HDassert( entry_ptr->is_protected ); - HDassert( entry_ptr->is_read_only ); - - if ( dirtied ) { + if(entry_ptr->ro_ref_count > 1) { + /* Sanity check */ + HDassert(entry_ptr->is_protected); + HDassert(entry_ptr->is_read_only); - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "Read only entry modified(1)??") - } + if(dirtied) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") + /* Reduce the RO ref count */ (entry_ptr->ro_ref_count)--; /* Pin or unpin the entry as requested. */ - if ( pin_entry ) { - + if(pin_entry) { /* Pin the entry from a client */ if(H5C_pin_entry_from_client(cache_ptr, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - - } else if ( unpin_entry ) { - + } else if(unpin_entry) { /* Unpin the entry from a client */ if(H5C_unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") - - } + } /* end if */ } else { + if(entry_ptr->is_read_only) { + /* Sanity check */ + HDassert(entry_ptr->ro_ref_count == 1); - if ( entry_ptr->is_read_only ) { - - HDassert( entry_ptr->ro_ref_count == 1 ); - - if ( dirtied ) { - - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "Read only entry modified(2)??") - } + if(dirtied) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??") entry_ptr->is_read_only = FALSE; entry_ptr->ro_ref_count = 0; - } + } /* end if */ #ifdef H5_HAVE_PARALLEL /* When the H5C code is used to implement the metadata cache in the @@ -5692,47 +5617,36 @@ H5C_unprotect(H5F_t * f, * are contiguous, with only one dirty flag, we have to let the supplied * functions deal with the reseting the is_dirty flag. */ - if ( entry_ptr->clear_on_unprotect ) { - - HDassert( entry_ptr->is_dirty ); + if(entry_ptr->clear_on_unprotect) { + /* Sanity check */ + HDassert(entry_ptr->is_dirty); entry_ptr->clear_on_unprotect = FALSE; - - if ( ! dirtied ) { - + if(!dirtied) clear_entry = TRUE; - } - } + } /* end if */ #endif /* H5_HAVE_PARALLEL */ - if ( ! (entry_ptr->is_protected) ) { - - HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \ - "Entry already unprotected??") - } - - /* mark the entry as dirty if appropriate */ - entry_ptr->is_dirty = ( (entry_ptr->is_dirty) || dirtied ); + if(!entry_ptr->is_protected) + HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??") - if ( ( was_clean ) && ( entry_ptr->is_dirty ) ) { + /* Mark the entry as dirty if appropriate */ + entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied); + /* Update index for newly dirtied entry */ + if(was_clean && entry_ptr->is_dirty) H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) - } /* Pin or unpin the entry as requested. */ - if ( pin_entry ) { - + if(pin_entry) { /* Pin the entry from a client */ if(H5C_pin_entry_from_client(cache_ptr, entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client") - - } else if ( unpin_entry ) { - + } else if(unpin_entry) { /* Unpin the entry from a client */ if(H5C_unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client") - - } + } /* end if */ /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on * the pinned entry list if entry_ptr->is_pinned is TRUE. @@ -5744,16 +5658,11 @@ H5C_unprotect(H5F_t * f, /* if the entry is dirty, 'or' its flush_marker with the set flush flag, * and then add it to the skip list if it isn't there already. */ - - if ( entry_ptr->is_dirty ) { - + if(entry_ptr->is_dirty) { entry_ptr->flush_marker |= set_flush_marker; - - if ( ! (entry_ptr->in_slist) ) { - + if(!entry_ptr->in_slist) H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL) - } - } + } /* end if */ /* this implementation of the "deleted" option is a bit inefficient, as * we re-insert the entry to be deleted into the replacement policy @@ -5798,10 +5707,8 @@ H5C_unprotect(H5F_t * f, entry_ptr->free_file_space_on_destroy = free_file_space; /* Set the "take ownership" flag for the flush, if needed */ - if ( take_ownership) { - + if(take_ownership) flush_flags |= H5C__TAKE_OWNERSHIP_FLAG; - } if ( H5C_flush_single_entry(f, primary_dxpl_id, @@ -7854,7 +7761,6 @@ H5C_flush_invalidate_cache(H5F_t * f, * may be created by the flush call backs. Thus it is possible * that the slist will not be empty after we finish the scan. */ - if ( cache_ptr->slist_len == 0 ) { node_ptr = NULL; @@ -7875,7 +7781,6 @@ H5C_flush_invalidate_cache(H5F_t * f, HDassert( next_entry_ptr->in_slist ); } - #if H5C_DO_SANITY_CHECKS /* Depending on circumstances, H5C_flush_single_entry() will * remove dirty entries from the slist as it flushes them. @@ -7886,10 +7791,12 @@ H5C_flush_invalidate_cache(H5F_t * f, initial_slist_size = cache_ptr->slist_size; /* There is also the possibility that entries will be - * dirtied, resized, and/or moved as the result of - * calls to the flush callbacks. We use the slist_len_increase - * and slist_size_increase increase fields in struct H5C_t - * to track these changes for purpose of sanity checking. + * dirtied, resized, moved, and/or removed from the cache + * as the result of calls to the flush callbacks. We use + * the slist_len_increase and slist_size_increase increase + * fields in struct H5C_t to track these changes for purpose + * of sanity checking. + * * To this end, we must zero these fields before we start * the pass through the slist. */ @@ -7988,7 +7895,7 @@ H5C_flush_invalidate_cache(H5F_t * f, ( cache_ptr->num_last_entries >= cache_ptr->slist_len ) ) ) { - #if H5C_DO_SANITY_CHECKS +#if H5C_DO_SANITY_CHECKS /* update actual_slist_len & actual_slist_size before * the flush. Note that the entry will be removed * from the slist after the flush, and thus may be @@ -8001,7 +7908,7 @@ H5C_flush_invalidate_cache(H5F_t * f, */ actual_slist_len++; actual_slist_size += entry_ptr->size; - #endif /* H5C_DO_SANITY_CHECKS */ +#endif /* H5C_DO_SANITY_CHECKS */ if ( entry_ptr->is_protected ) { @@ -8028,8 +7935,8 @@ H5C_flush_invalidate_cache(H5F_t * f, FALSE); if ( status < 0 ) { - /* This shouldn't happen -- if it does, we are toast - * so just scream and die. + /* This shouldn't happen -- if it does, we + * are toast so just scream and die. */ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ @@ -8054,8 +7961,8 @@ H5C_flush_invalidate_cache(H5F_t * f, TRUE); if ( status < 0 ) { - /* This shouldn't happen -- if it does, we are toast so - * just scream and die. + /* This shouldn't happen -- if it does, we + * are toast so just scream and die. */ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ @@ -8123,8 +8030,9 @@ H5C_flush_invalidate_cache(H5F_t * f, if ( entry_ptr->is_protected ) { - /* we have major problems -- but lets flush and destroy - * everything we can before we flag an error. + /* we have major problems -- but lets flush and + * destroy everything we can before we flag an + * error. */ protected_entries++; @@ -8148,8 +8056,8 @@ H5C_flush_invalidate_cache(H5F_t * f, TRUE); if ( status < 0 ) { - /* This shouldn't happen -- if it does, we are toast so - * just scream and die. + /* This shouldn't happen -- if it does, + * we are toast so just scream and die. */ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ @@ -8304,10 +8212,10 @@ done: * primary_dxpl_id, and secondary_dxpl_id are all irrelevent, * and the call can't be part of a sequence of flushes. * - * If the caller knows the address of the TBBT node at + * If the caller knows the address of the skip list node at * which the target entry resides, it can avoid a lookup * by supplying that address in the tgt_node_ptr parameter. - * If this parameter is NULL, the function will do a TBBT + * If this parameter is NULL, the function will do a skip list * search for the entry instead. * * The function does nothing silently if there is no entry @@ -8332,9 +8240,9 @@ H5C_flush_single_entry(H5F_t * f, hbool_t del_entry_from_slist_on_destroy) { H5C_t * cache_ptr = f->shared->cache; - hbool_t destroy; - hbool_t clear_only; - hbool_t take_ownership; + hbool_t destroy; /* external flag */ + hbool_t clear_only; /* external flag */ + hbool_t take_ownership; /* external flag */ hbool_t was_dirty; hbool_t destroy_entry; herr_t status; @@ -8351,12 +8259,13 @@ H5C_flush_single_entry(H5F_t * f, HDassert( H5F_addr_defined(addr) ); HDassert( first_flush_ptr ); - destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 ); - clear_only = ( (flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0); - take_ownership = ( (flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); + /* setup external flags from the flags parameter */ + destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0); + clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0); + take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0); /* Set the flag for destroying the entry, based on the 'take ownership' - * and 'destroy' flags + * and 'destroy' flags */ if(take_ownership) destroy_entry = FALSE; @@ -8366,6 +8275,7 @@ H5C_flush_single_entry(H5F_t * f, /* attempt to find the target entry in the hash table */ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL) + /* run initial sanity checks */ #if H5C_DO_SANITY_CHECKS if ( entry_ptr != NULL ) { @@ -8526,11 +8436,10 @@ H5C_flush_single_entry(H5F_t * f, #if H5C_DO_SANITY_CHECKS if ( ( entry_ptr->is_dirty ) && ( cache_ptr->check_write_permitted == NULL ) && - ( ! (cache_ptr->write_permitted) ) ) { + ( ! (cache_ptr->write_permitted) ) ) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ "Write when writes are always forbidden!?!?!") - } #endif /* H5C_DO_SANITY_CHECKS */ if ( destroy ) { @@ -8598,12 +8507,11 @@ H5C_flush_single_entry(H5F_t * f, * If that ceases to be the case, further * tests will be necessary. */ - if ( cache_ptr->aux_ptr != NULL ) { + if ( cache_ptr->aux_ptr != NULL ) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ "resize/move in serialize occured in parallel case.") - } } #endif /* H5_HAVE_PARALLEL */ } @@ -8701,6 +8609,7 @@ H5C_flush_single_entry(H5F_t * f, } + /* reset the flush_in progress flag */ entry_ptr->flush_in_progress = FALSE; } @@ -8740,10 +8649,6 @@ done: * * Programmer: John Mainzer, 5/18/04 * - * QAK -- 1/31/08 - * Added initialization for the new free_file_space_on_destroy - * field. - * *------------------------------------------------------------------------- */ static void * @@ -8941,7 +8846,6 @@ H5C_make_space_in_cache(H5F_t * f, if ( write_permitted ) { initial_list_len = cache_ptr->LRU_list_len; - entry_ptr = cache_ptr->LRU_tail_ptr; if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) { @@ -8997,6 +8901,7 @@ H5C_make_space_in_cache(H5F_t * f, cache_ptr->entries_scanned_to_make_space++; } #endif /* H5C_COLLECT_CACHE_STATS */ + result = H5C_flush_single_entry(f, primary_dxpl_id, secondary_dxpl_id, diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index 0fdaa79..ae6bdad 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -177,8 +177,8 @@ * entry is flushed to disk. * * - * In cases where memory is plentiful, and performance is an issue, it - * is useful to disable all cache evictions, and thereby postpone metadata + * In cases where memory is plentiful, and performance is an issue, it may + * be useful to disable all cache evictions, and thereby postpone metadata * writes. The following field is used to implement this. * * evictions_enabled: Boolean flag that is initialized to TRUE. When @@ -283,14 +283,14 @@ * some optimizations when I get to it. * * num_last_entries: The number of entries in the cache that can only be - * flushed after all other entries in the cache have - * been flushed. At this time, this will only ever be - * one entry (the superblock), and the code has been - * protected with HDasserts to enforce this. This restraint - * can certainly be relaxed in the future if the need for - * multiple entries being flushed last arises, though - * explicit tests for that case should be added when said - * HDasserts are removed. + * flushed after all other entries in the cache have + * been flushed. At this time, this will only ever be + * one entry (the superblock), and the code has been + * protected with HDasserts to enforce this. This restraint + * can certainly be relaxed in the future if the need for + * multiple entries being flushed last arises, though + * explicit tests for that case should be added when said + * HDasserts are removed. * * With the addition of the fractal heap, the cache must now deal with * the case in which entries may be dirtied, moved, or have their sizes @@ -354,7 +354,8 @@ * flush. * * Since pinned entries cannot be evicted, they must be kept on a pinned - * entry list, instead of being entrusted to the replacement policy code. + * entry list (pel), instead of being entrusted to the replacement policy + * code. * * Maintaining the pinned entry list requires the following fields: * @@ -382,7 +383,8 @@ * * While there has been interest in several replacement policies for * this cache, the initial development schedule is tight. Thus I have - * elected to support only a modified LRU policy for the first cut. + * elected to support only a modified LRU (least recently used) policy + * for the first cut. * * To further simplify matters, I have simply included the fields needed * by the modified LRU in this structure. When and if we add support for @@ -679,7 +681,7 @@ * equal to the array index has been evicted from the cache in * the current epoch. * - * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells + * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells * are used to record the number of times an entry with type * id equal to the array index has been moved in the current * epoch. @@ -714,7 +716,7 @@ * with type id equal to the array index has been flushed while * pinned in the current epoch. * - * pinned_cleared: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The + * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The * cells are used to record the number of times an entry * with type id equal to the array index has been cleared while * pinned in the current epoch. @@ -2350,7 +2352,7 @@ if ( (cache_ptr)->index_size != \ HDassert( (new_size) <= (cache_ptr)->slist_size ); \ HDassert( ( (cache_ptr)->slist_len > 1 ) || \ ( (cache_ptr)->slist_size == (new_size) ) ); \ -} /* H5C__REMOVE_ENTRY_FROM_SLIST */ +} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ #else /* H5C_DO_SANITY_CHECKS */ @@ -2371,7 +2373,7 @@ if ( (cache_ptr)->index_size != \ HDassert( (new_size) <= (cache_ptr)->slist_size ); \ HDassert( ( (cache_ptr)->slist_len > 1 ) || \ ( (cache_ptr)->slist_size == (new_size) ) ); \ -} /* H5C__REMOVE_ENTRY_FROM_SLIST */ +} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */ #endif /* H5C_DO_SANITY_CHECKS */ diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h index 38b9469..c9679f4 100644 --- a/src/H5Cprivate.h +++ b/src/H5Cprivate.h @@ -292,16 +292,15 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, * dirtied while protected. * * This field is set to FALSE in the protect call, and may - * be set to TRUE by the - * H5C_mark_entry_dirty() - * call at an time prior to the unprotect call. + * be set to TRUE by the H5C_mark_entry_dirty() call at any + * time prior to the unprotect call. * - * The H5C_mark_entry_dirty() call exists - * as a convenience function for the fractal heap code which - * may not know if an entry is protected or pinned, but knows - * that is either protected or pinned. The dirtied field was - * added as in the parallel case, it is necessary to know - * whether a protected entry was dirty prior to the protect call. + * The H5C_mark_entry_dirty() call exists as a convenience + * function for the fractal heap code which may not know if + * an entry is protected or pinned, but knows that is either + * protected or pinned. The dirtied field was added as in + * the parallel case, it is necessary to know whether a + * protected entry is dirty prior to the protect call. * * is_protected: Boolean flag indicating whether this entry is protected * (or locked, to use more conventional terms). When it is @@ -372,21 +371,22 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr, * the entry is flushed for whatever reason. * * flush_me_last: Boolean flag indicating that this entry should not be - * flushed from the cache until all other entries without - * the flush_me_last flag set have been flushed. + * flushed from the cache until all other entries without + * the flush_me_last flag set have been flushed. * * flush_me_collectively: Boolean flag indicating that this entry needs - * to be flushed collectively when in a parallel - * situation. + * to be flushed collectively when in a parallel situation. * - * Note: At this time, the flush_me_last and flush_me_collectively - * flags will only be applied to one entry, the superblock, - * and the code utilizing these flags is protected with HDasserts - * to enforce this. This restraint can certainly be relaxed in - * the future if the the need for multiple entries getting flushed - * last or collectively arises, though the code allowing for that - * will need to be expanded and tested appropriately if that - * functionality is desired. + * Note: + * + * At this time, the flush_me_last and flush_me_collectively + * flags will only be applied to one entry, the superblock, + * and the code utilizing these flags is protected with HDasserts + * to enforce this. This restraint can certainly be relaxed in + * the future if the the need for multiple entries getting flushed + * last or collectively arises, though the code allowing for that + * will need to be expanded and tested appropriately if that + * functionality is desired. * * clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used * to implement the metadata cache In the parallel case, only @@ -608,7 +608,7 @@ typedef struct H5C_cache_entry_t #ifdef H5_HAVE_PARALLEL hbool_t flush_me_collectively; hbool_t clear_on_unprotect; - hbool_t flush_immediately; + hbool_t flush_immediately; #endif /* H5_HAVE_PARALLEL */ hbool_t flush_in_progress; hbool_t destroy_in_progress; @@ -617,10 +617,10 @@ typedef struct H5C_cache_entry_t /* fields supporting the 'flush dependency' feature: */ struct H5C_cache_entry_t * flush_dep_parent; - uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS]; - unsigned flush_dep_height; - hbool_t pinned_from_client; - hbool_t pinned_from_cache; + uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS]; + unsigned flush_dep_height; + hbool_t pinned_from_client; + hbool_t pinned_from_cache; /* fields supporting the hash table: */ |