diff options
Diffstat (limited to 'src/H5C.c')
-rw-r--r-- | src/H5C.c | 126 |
1 files changed, 86 insertions, 40 deletions
@@ -386,6 +386,7 @@ H5C_create(size_t max_cache_size, cache_ptr->cache_full = FALSE; cache_ptr->size_decreased = FALSE; cache_ptr->resize_in_progress = FALSE; + cache_ptr->msic_in_progress = FALSE; (cache_ptr->resize_ctl).version = H5C__CURR_AUTO_SIZE_CTL_VER; (cache_ptr->resize_ctl).rpt_fcn = NULL; @@ -925,12 +926,6 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected") if(entry_ptr->is_pinned) HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned") -#ifdef H5_HAVE_PARALLEL - if(entry_ptr->coll_access) { - entry_ptr->coll_access = FALSE; - H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) - } /* end if */ -#endif /* H5_HAVE_PARALLEL */ /* If we get this far, call H5C__flush_single_entry() with the * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG. @@ -1465,10 +1460,15 @@ H5C_insert_entry(H5F_t * f, entry_ptr->prefetched = FALSE; entry_ptr->prefetch_type_id = 0; entry_ptr->age = 0; + entry_ptr->prefetched_dirty = FALSE; #ifndef NDEBUG /* debugging field */ entry_ptr->serialization_count = 0; #endif /* NDEBUG */ + entry_ptr->tl_next = NULL; + entry_ptr->tl_prev = NULL; + entry_ptr->tag_info = NULL; + /* Apply tag to newly inserted entry */ if(H5C__tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry") @@ -2437,7 +2437,7 @@ H5C_protect(H5F_t * f, marked as collective, and is clean, it is possible that other processes will not have it in its cache and will expect a bcast of the entry from process 0. So process 0 - will bcast the entry to all other ranks. Ranks that do have + will bcast the entry to all other ranks. Ranks that _do_ have the entry in their cache still have to participate in the bcast. */ #ifdef H5_HAVE_PARALLEL @@ -4586,7 +4586,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) && ( bytes_evicted < eviction_size_limit ) ) { - hbool_t corked = FALSE; + hbool_t skipping_entry = FALSE; HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); HDassert( ! (entry_ptr->is_protected) ); @@ -4600,9 +4600,11 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, prev_is_dirty = prev_ptr->is_dirty; if(entry_ptr->is_dirty ) { + HDassert(!entry_ptr->prefetched_dirty); + /* dirty corked entry is skipped */ if(entry_ptr->tag_info && entry_ptr->tag_info->corked) - corked = TRUE; + skipping_entry = TRUE; else { /* reset entries_removed_counter and * last_entry_removed_ptr prior to the call to @@ -4622,16 +4624,22 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, restart_scan = TRUE; } /* end else */ } /* end if */ - else { + else if(!entry_ptr->prefetched_dirty) { bytes_evicted += entry_ptr->size; if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 ) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") - } + } /* end else-if */ + else { + HDassert(!entry_ptr->is_dirty); + HDassert(entry_ptr->prefetched_dirty); + + skipping_entry = TRUE; + } /* end else */ if(prev_ptr != NULL) { - if(corked) /* dirty corked entry is skipped */ + if(skipping_entry) entry_ptr = prev_ptr; else if(restart_scan || (prev_ptr->is_dirty != prev_is_dirty) || (prev_ptr->next != next_ptr) @@ -4691,10 +4699,10 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, prev_ptr = entry_ptr->prev; - if(!(entry_ptr->is_dirty)) { + if(!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty)) if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry") - } /* end if */ + /* just skip the entry if it is dirty, as we can't do * anything with it now since we can't write. * @@ -6033,10 +6041,6 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr, else destroy_entry = destroy; -#ifdef H5_HAVE_PARALLEL - HDassert(FALSE == entry_ptr->coll_access); -#endif - /* we will write the entry to disk if it exists, is dirty, and if the * clear only flag is not set. */ @@ -6232,9 +6236,11 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr, * * 2) Delete it from the skip list if requested. * - * 3) Update the replacement policy for eviction + * 3) Delete it from the collective read access list. * - * 4) Remove it from the tag list for this object + * 4) Update the replacement policy for eviction + * + * 5) Remove it from the tag list for this object * * Finally, if the destroy_entry flag is set, discard the * entry. @@ -6244,6 +6250,14 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr, if(entry_ptr->in_slist && del_from_slist_on_destroy) H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) +#ifdef H5_HAVE_PARALLEL + /* Check for collective read access flag */ + if(entry_ptr->coll_access) { + entry_ptr->coll_access = FALSE; + H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL) /* Remove entry from tag list */ @@ -6290,7 +6304,8 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr, HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared") /* Propagate the clean flag up the flush dependency chain if appropriate */ - HDassert(entry_ptr->flush_dep_ndirty_children == 0); + if(entry_ptr->flush_dep_ndirty_children != 0) + HDassert(entry_ptr->flush_dep_ndirty_children == 0); if(entry_ptr->flush_dep_nparents > 0) if(H5C__mark_flush_dep_clean(entry_ptr) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep clean flag") @@ -6822,10 +6837,15 @@ H5C_load_entry(H5F_t * f, entry->prefetched = FALSE; entry->prefetch_type_id = 0; entry->age = 0; + entry->prefetched_dirty = FALSE; #ifndef NDEBUG /* debugging field */ entry->serialization_count = 0; #endif /* NDEBUG */ + entry->tl_next = NULL; + entry->tl_prev = NULL; + entry->tag_info = NULL; + H5C__RESET_CACHE_ENTRY_STATS(entry); ret_value = thing; @@ -6864,13 +6884,6 @@ done: * Thus the function simply does its best, returning success * unless an error is encountered. * - * The primary_dxpl_id and secondary_dxpl_id parameters - * specify the dxpl_ids used on the first write occasioned - * by the call (primary_dxpl_id), and on all subsequent - * writes (secondary_dxpl_id). This is useful in the metadata - * cache, but may not be needed elsewhere. If so, just use the - * same dxpl_id for both parameters. - * * Observe that this function cannot occasion a read. * * Return: Non-negative on success/Negative on failure. @@ -6886,11 +6899,13 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, H5C_t * cache_ptr = f->shared->cache; #if H5C_COLLECT_CACHE_STATS int32_t clean_entries_skipped = 0; + int32_t dirty_pf_entries_skipped = 0; int32_t total_entries_scanned = 0; #endif /* H5C_COLLECT_CACHE_STATS */ uint32_t entries_examined = 0; uint32_t initial_list_len; size_t empty_space; + hbool_t reentrant_call = FALSE; hbool_t prev_is_dirty = FALSE; hbool_t didnt_flush_entry = FALSE; hbool_t restart_scan; @@ -6908,6 +6923,18 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC); HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size)); + /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this + * is a re-entrant call via a client callback called in the make + * space in cache process. To avoid an infinite recursion, set + * reentrant_call to TRUE, and goto done. + */ + if(cache_ptr->msic_in_progress) { + reentrant_call = TRUE; + HGOTO_DONE(SUCCEED); + } /* end if */ + + cache_ptr->msic_in_progress = TRUE; + if ( write_permitted ) { restart_scan = FALSE; initial_list_len = cache_ptr->LRU_list_len; @@ -6954,7 +6981,8 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, didnt_flush_entry = TRUE; } else if ( ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) && - ( ! entry_ptr->flush_in_progress ) ) { + ( ! entry_ptr->flush_in_progress ) && + ( ! entry_ptr->prefetched_dirty ) ) { didnt_flush_entry = FALSE; @@ -6980,13 +7008,6 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, cache_ptr->entries_removed_counter = 0; cache_ptr->last_entry_removed_ptr = NULL; -#ifdef H5_HAVE_PARALLEL - if(TRUE == entry_ptr->coll_access) { - entry_ptr->coll_access = FALSE; - H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) - } /* end if */ -#endif - if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") @@ -7020,10 +7041,16 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, } else { - /* Skip epoch markers and entries that are in the process - * of being flushed. + /* Skip epoch markers, entries that are in the process + * of being flushed, and entries marked as prefetched_dirty + * (occurs in the R/O case only). */ didnt_flush_entry = TRUE; + +#if H5C_COLLECT_CACHE_STATS + if(entry_ptr->prefetched_dirty) + dirty_pf_entries_skipped++; +#endif /* H5C_COLLECT_CACHE_STATS */ } if ( prev_ptr != NULL ) { @@ -7088,6 +7115,7 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, cache_ptr->calls_to_msic++; cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped; + cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped; cache_ptr->total_entries_scanned_in_msic += total_entries_scanned; if ( clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic ) { @@ -7095,6 +7123,9 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped; } + if(dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic) + cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped; + if ( total_entries_scanned > cache_ptr->max_entries_scanned_in_msic ) { cache_ptr->max_entries_scanned_in_msic = total_entries_scanned; @@ -7163,6 +7194,12 @@ H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed, } done: + /* Sanity checks */ + HDassert(cache_ptr->msic_in_progress); + if(!reentrant_call) + cache_ptr->msic_in_progress = FALSE; + HDassert((!reentrant_call) || (cache_ptr->msic_in_progress)); + FUNC_LEAVE_NOAPI(ret_value) } /* H5C__make_space_in_cache() */ @@ -8723,12 +8760,21 @@ H5C_remove_entry(void *_entry) /* Update the cache internal data structures as appropriate for a destroy. * Specifically: * 1) Delete it from the index - * 2) Update the replacement policy for eviction - * 3) Remove it from the tag list for this object + * 2) Delete it from the collective read access list + * 3) Update the replacement policy for eviction + * 4) Remove it from the tag list for this object */ H5C__DELETE_FROM_INDEX(cache, entry, FAIL) +#ifdef H5_HAVE_PARALLEL + /* Check for collective read access flag */ + if(entry->coll_access) { + entry->coll_access = FALSE; + H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL) + } /* end if */ +#endif /* H5_HAVE_PARALLEL */ + H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL) /* Remove entry from tag list */ |