summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorQuincey Koziol <quincey@koziol.cc>2023-05-12 19:52:11 (GMT)
committerGitHub <noreply@github.com>2023-05-12 19:52:11 (GMT)
commit0d4a12d7cd0f0c10b385533365fc1f3ebeef8e74 (patch)
treedd065c47aa9ddd88cbb8a7d86391c0b4d10e1313 /src
parent2a2c2d9ebea6e4a7b5464b54ae412e5dd659eed1 (diff)
downloadhdf5-0d4a12d7cd0f0c10b385533365fc1f3ebeef8e74.zip
hdf5-0d4a12d7cd0f0c10b385533365fc1f3ebeef8e74.tar.gz
hdf5-0d4a12d7cd0f0c10b385533365fc1f3ebeef8e74.tar.bz2
Move functions into more focused source code modules (#2936)
* Move functions into more focused source code modules, along with a small # of directly secondary effects. No actual changes to the contents of any moved routines. Signed-off-by: Quincey Koziol <quincey@koziol.cc> * Committing clang-format changes --------- Signed-off-by: Quincey Koziol <quincey@koziol.cc> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt2
-rw-r--r--src/H5C.c6807
-rw-r--r--src/H5Cdbg.c483
-rw-r--r--src/H5Centry.c4363
-rw-r--r--src/H5Cimage.c588
-rw-r--r--src/H5Cint.c2578
-rw-r--r--src/H5Clog.c4
-rw-r--r--src/H5Clog.h4
-rw-r--r--src/H5Clog_json.c8
-rw-r--r--src/H5Clog_trace.c8
-rw-r--r--src/H5Cmpio.c4
-rw-r--r--src/H5Cpkg.h29
-rw-r--r--src/H5Cprivate.h2
-rw-r--r--src/H5Cquery.c1
-rw-r--r--src/H5Ctag.c66
-rw-r--r--src/Makefile.am3
16 files changed, 7508 insertions, 7442 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 4edcc98..fc21eac 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -78,8 +78,10 @@ IDE_GENERATED_PROPERTIES ("H5B2" "${H5B2_HDRS}" "${H5B2_SOURCES}" )
set (H5C_SOURCES
${HDF5_SRC_DIR}/H5C.c
${HDF5_SRC_DIR}/H5Cdbg.c
+ ${HDF5_SRC_DIR}/H5Centry.c
${HDF5_SRC_DIR}/H5Cepoch.c
${HDF5_SRC_DIR}/H5Cimage.c
+ ${HDF5_SRC_DIR}/H5Cint.c
${HDF5_SRC_DIR}/H5Clog.c
${HDF5_SRC_DIR}/H5Clog_json.c
${HDF5_SRC_DIR}/H5Clog_trace.c
diff --git a/src/H5C.c b/src/H5C.c
index b80ed98..348e643 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -61,74 +61,26 @@
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
+#include "H5ACprivate.h" /* Metadata cache */
#include "H5Cpkg.h" /* Cache */
-#include "H5CXprivate.h" /* API Contexts */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files */
#include "H5FLprivate.h" /* Free Lists */
-#include "H5Iprivate.h" /* IDs */
#include "H5MFprivate.h" /* File memory management */
#include "H5MMprivate.h" /* Memory management */
-#include "H5Pprivate.h" /* Property lists */
/****************/
/* Local Macros */
/****************/
-#if H5C_DO_MEMORY_SANITY_CHECKS
-#define H5C_IMAGE_EXTRA_SPACE 8
-#define H5C_IMAGE_SANITY_VALUE "DeadBeef"
-#else /* H5C_DO_MEMORY_SANITY_CHECKS */
-#define H5C_IMAGE_EXTRA_SPACE 0
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
/******************/
/* Local Typedefs */
/******************/
-/* Alias for pointer to cache entry, for use when allocating sequences of them */
-typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t;
-
/********************/
/* Local Prototypes */
/********************/
-static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
-static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
-static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
-static herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted);
-static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
- size_t *new_max_cache_size_ptr, hbool_t write_permitted);
-static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr);
-static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted);
-static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr);
-static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr);
-static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr);
-static herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size);
-static herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags);
-static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
-static herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
-static void *H5C__load_entry(H5F_t *f,
-#ifdef H5_HAVE_PARALLEL
- hbool_t coll_access,
-#endif /* H5_HAVE_PARALLEL */
- const H5C_class_t *type, haddr_t addr, void *udata);
-
-static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry);
-static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry);
-static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry);
-static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry);
-
-static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring);
-static herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
-static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
-static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len,
- hbool_t actual);
-
-#ifndef NDEBUG
-static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry,
- const H5C_cache_entry_t *base_entry);
-#endif
-
/*********************/
/* Package Variables */
/*********************/
@@ -147,9 +99,6 @@ H5FL_DEFINE(H5C_tag_info_t);
/* Declare a free list to manage the H5C_t struct */
H5FL_DEFINE_STATIC(H5C_t);
-/* Declare a free list to manage arrays of cache entries */
-H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t);
-
/*-------------------------------------------------------------------------
* Function: H5C_create
*
@@ -436,137 +385,6 @@ done:
} /* H5C_create() */
/*-------------------------------------------------------------------------
- * Function: H5C_def_auto_resize_rpt_fcn
- *
- * Purpose: Print results of a automatic cache resize.
- *
- * This function should only be used where HDprintf() behaves
- * well -- i.e. not on Windows.
- *
- * Return: void
- *
- * Programmer: John Mainzer
- * 10/27/04
- *
- *-------------------------------------------------------------------------
- */
-void
-H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr,
-#ifndef NDEBUG
- int32_t version,
-#else
- int32_t H5_ATTR_UNUSED version,
-#endif
- double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size,
- size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size)
-{
- HDassert(cache_ptr != NULL);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER);
-
- switch (status) {
- case in_spec:
- HDfprintf(stdout, "%sAuto cache resize -- no change. (hit rate = %lf)\n", cache_ptr->prefix,
- hit_rate);
- break;
-
- case increase:
- HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold);
- HDassert(old_max_cache_size < new_max_cache_size);
-
- HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold);
- HDfprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
- old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
- break;
-
- case flash_increase:
- HDassert(old_max_cache_size < new_max_cache_size);
-
- HDfprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix,
- (int)(cache_ptr->resize_ctl.flash_incr_mode), cache_ptr->flash_size_increase_threshold);
- HDfprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
- old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
- break;
-
- case decrease:
- HDassert(old_max_cache_size > new_max_cache_size);
-
- switch (cache_ptr->resize_ctl.decr_mode) {
- case H5C_decr__off:
- HDfprintf(stdout, "%sAuto cache resize -- decrease off. HR = %lf\n", cache_ptr->prefix,
- hit_rate);
- break;
-
- case H5C_decr__threshold:
- HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold);
-
- HDfprintf(stdout, "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n",
- cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold);
- HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix,
- cache_ptr->resize_ctl.upper_hr_threshold);
- break;
-
- case H5C_decr__age_out:
- HDfprintf(stdout, "%sAuto cache resize -- decrease by ageout. HR = %lf\n",
- cache_ptr->prefix, hit_rate);
- break;
-
- case H5C_decr__age_out_with_threshold:
- HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold);
-
- HDfprintf(stdout,
- "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n",
- cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold);
- break;
-
- default:
- HDfprintf(stdout, "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n",
- cache_ptr->prefix, hit_rate);
- }
-
- HDfprintf(stdout, "%s cache size decreased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
- old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
- break;
-
- case at_max_size:
- HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold);
- HDfprintf(stdout, "%s cache already at maximum size so no change.\n", cache_ptr->prefix);
- break;
-
- case at_min_size:
- HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n", cache_ptr->prefix,
- hit_rate);
- HDfprintf(stdout, "%s cache already at minimum size.\n", cache_ptr->prefix);
- break;
-
- case increase_disabled:
- HDfprintf(stdout, "%sAuto cache resize -- increase disabled -- HR = %lf.", cache_ptr->prefix,
- hit_rate);
- break;
-
- case decrease_disabled:
- HDfprintf(stdout, "%sAuto cache resize -- decrease disabled -- HR = %lf.\n", cache_ptr->prefix,
- hit_rate);
- break;
-
- case not_full:
- HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold);
-
- HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
- cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold);
- HDfprintf(stdout, "%s cache not full so no increase in size.\n", cache_ptr->prefix);
- break;
-
- default:
- HDfprintf(stdout, "%sAuto cache resize -- unknown status code.\n", cache_ptr->prefix);
- break;
- }
-} /* H5C_def_auto_resize_rpt_fcn() */
-
-/*-------------------------------------------------------------------------
- *
* Function: H5C_prep_for_file_close
*
* Purpose: This function should be called just prior to the cache
@@ -687,7 +505,7 @@ H5C_dest(H5F_t *f)
HDassert(cache_ptr->close_warning_received);
#if H5AC_DUMP_IMAGE_STATS_ON_CLOSE
- if (H5C_image_stats(cache_ptr, TRUE) < 0)
+ if (H5C__image_stats(cache_ptr, TRUE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats")
#endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
@@ -789,81 +607,6 @@ done:
} /* H5C_evict() */
/*-------------------------------------------------------------------------
- * Function: H5C_expunge_entry
- *
- * Purpose: Expunge an entry from the cache without writing it to disk
- * even if it is dirty. The entry may not be either pinned or
- * protected.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 6/29/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr = NULL;
- unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG);
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(type);
- HDassert(H5F_addr_defined(addr));
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- /* Look for entry in cache */
- H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if ((entry_ptr == NULL) || (entry_ptr->type != type))
- /* the target doesn't exist in the cache, so we are done. */
- HGOTO_DONE(SUCCEED)
-
- HDassert(entry_ptr->addr == addr);
- HDassert(entry_ptr->type == type);
-
- /* Check for entry being pinned or protected */
- if (entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected")
- if (entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned")
-
- /* If we get this far, call H5C__flush_single_entry() with the
- * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG.
- * This will clear the entry, and then delete it from the cache.
- */
-
- /* Pass along 'free file space' flag */
- flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG);
-
- /* Delete the entry from the skip list on destroy */
- flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
-
- if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry")
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_expunge_entry() */
-
-/*-------------------------------------------------------------------------
* Function: H5C_flush_cache
*
* Purpose: Flush (and possibly destroy) the entries contained in the
@@ -1059,1325 +802,6 @@ done:
} /* H5C_flush_to_min_clean() */
/*-------------------------------------------------------------------------
- * Function: H5C_insert_entry
- *
- * Purpose: Adds the specified thing to the cache. The thing need not
- * exist on disk yet, but it must have an address and disk
- * space reserved.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 6/2/04
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags)
-{
- H5C_t *cache_ptr;
- H5AC_ring_t ring = H5C_RING_UNDEFINED;
- hbool_t insert_pinned;
- hbool_t flush_last;
-#ifdef H5_HAVE_PARALLEL
- hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
-#endif /* H5_HAVE_PARALLEL */
- hbool_t set_flush_marker;
- hbool_t write_permitted = TRUE;
- size_t empty_space;
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *test_entry_ptr;
- hbool_t entry_tagged = FALSE;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert(f);
- HDassert(f->shared);
-
- cache_ptr = f->shared->cache;
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(type);
- HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
- HDassert(type->image_len);
- HDassert(H5F_addr_defined(addr));
- HDassert(thing);
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- /* no need to verify that entry is not already in the index as */
- /* we already make that check below. */
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
- insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
- flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
-
- /* Get the ring type from the API context */
- ring = H5CX_get_ring();
-
- entry_ptr = (H5C_cache_entry_t *)thing;
-
- /* verify that the new entry isn't already in the hash table -- scream
- * and die if it is.
- */
-
- H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
-
- if (test_entry_ptr != NULL) {
- if (test_entry_ptr == entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache")
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache")
- } /* end if */
-
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- entry_ptr->cache_ptr = cache_ptr;
- entry_ptr->addr = addr;
- entry_ptr->type = type;
-
- entry_ptr->image_ptr = NULL;
- entry_ptr->image_up_to_date = FALSE;
-
- entry_ptr->is_protected = FALSE;
- entry_ptr->is_read_only = FALSE;
- entry_ptr->ro_ref_count = 0;
-
- entry_ptr->is_pinned = insert_pinned;
- entry_ptr->pinned_from_client = insert_pinned;
- entry_ptr->pinned_from_cache = FALSE;
- entry_ptr->flush_me_last = flush_last;
-
- /* newly inserted entries are assumed to be dirty */
- entry_ptr->is_dirty = TRUE;
-
- /* not protected, so can't be dirtied */
- entry_ptr->dirtied = FALSE;
-
- /* Retrieve the size of the thing */
- if ((type->image_len)(thing, &(entry_ptr->size)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing")
- HDassert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE);
-
- entry_ptr->in_slist = FALSE;
-
-#ifdef H5_HAVE_PARALLEL
- entry_ptr->clear_on_unprotect = FALSE;
- entry_ptr->flush_immediately = FALSE;
-#endif /* H5_HAVE_PARALLEL */
-
- entry_ptr->flush_in_progress = FALSE;
- entry_ptr->destroy_in_progress = FALSE;
-
- entry_ptr->ring = ring;
-
- /* Initialize flush dependency fields */
- entry_ptr->flush_dep_parent = NULL;
- entry_ptr->flush_dep_nparents = 0;
- entry_ptr->flush_dep_parent_nalloc = 0;
- entry_ptr->flush_dep_nchildren = 0;
- entry_ptr->flush_dep_ndirty_children = 0;
- entry_ptr->flush_dep_nunser_children = 0;
-
- entry_ptr->ht_next = NULL;
- entry_ptr->ht_prev = NULL;
- entry_ptr->il_next = NULL;
- entry_ptr->il_prev = NULL;
-
- entry_ptr->next = NULL;
- entry_ptr->prev = NULL;
-
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- entry_ptr->aux_next = NULL;
- entry_ptr->aux_prev = NULL;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#ifdef H5_HAVE_PARALLEL
- entry_ptr->coll_next = NULL;
- entry_ptr->coll_prev = NULL;
-#endif /* H5_HAVE_PARALLEL */
-
- /* initialize cache image related fields */
- entry_ptr->include_in_image = FALSE;
- entry_ptr->lru_rank = 0;
- entry_ptr->image_dirty = FALSE;
- entry_ptr->fd_parent_count = 0;
- entry_ptr->fd_parent_addrs = NULL;
- entry_ptr->fd_child_count = 0;
- entry_ptr->fd_dirty_child_count = 0;
- entry_ptr->image_fd_height = 0;
- entry_ptr->prefetched = FALSE;
- entry_ptr->prefetch_type_id = 0;
- entry_ptr->age = 0;
- entry_ptr->prefetched_dirty = FALSE;
-#ifndef NDEBUG /* debugging field */
- entry_ptr->serialization_count = 0;
-#endif
-
- /* initialize tag list fields */
- entry_ptr->tl_next = NULL;
- entry_ptr->tl_prev = NULL;
- entry_ptr->tag_info = NULL;
-
- /* Apply tag to newly inserted entry */
- if (H5C__tag_entry(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
- entry_tagged = TRUE;
-
- H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
-
- if (cache_ptr->flash_size_increase_possible &&
- (entry_ptr->size > cache_ptr->flash_size_increase_threshold))
- if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed")
-
- if (cache_ptr->index_size >= cache_ptr->max_cache_size)
- empty_space = 0;
- else
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
-
- if (cache_ptr->evictions_enabled &&
- (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) ||
- (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) {
- size_t space_needed;
-
- if (empty_space <= entry_ptr->size)
- cache_ptr->cache_full = TRUE;
-
- if (cache_ptr->check_write_permitted != NULL) {
- if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
- } /* end if */
- else
- write_permitted = cache_ptr->write_permitted;
-
- HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
- space_needed = entry_ptr->size;
- if (space_needed > cache_ptr->max_cache_size)
- space_needed = cache_ptr->max_cache_size;
-
- /* Note that space_needed is just the amount of space that
- * needed to insert the new entry without exceeding the cache
- * size limit. The subsequent call to H5C__make_space_in_cache()
- * may evict the entries required to free more or less space
- * depending on conditions. It MAY be less if the cache is
- * currently undersized, or more if the cache is oversized.
- *
- * The cache can exceed its maximum size limit via the following
- * mechanisms:
- *
- * First, it is possible for the cache to grow without
- * bound as long as entries are protected and not unprotected.
- *
- * Second, when writes are not permitted it is also possible
- * for the cache to grow without bound.
- *
- * Finally, we usually don't check to see if the cache is
- * oversized at the end of an unprotect. As a result, it is
- * possible to have a vastly oversized cache with no protected
- * entries as long as all the protects precede the unprotects.
- */
-
- if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
- } /* end if */
-
- H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
-
- /* New entries are presumed to be dirty */
- HDassert(entry_ptr->is_dirty);
- entry_ptr->flush_marker = set_flush_marker;
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
- H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- /* If the entry's type has a 'notify' callback send a 'after insertion'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache")
-
- H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
-
-#ifdef H5_HAVE_PARALLEL
- if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
- coll_access = H5F_get_coll_metadata_reads(f);
-
- entry_ptr->coll_access = coll_access;
- if (coll_access) {
- H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL)
-
- /* Make sure the size of the collective entries in the cache remain in check */
- if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) {
- if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) {
- if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
- } /* end if */
- } /* end if */
- else {
- if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) {
- if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
- } /* end if */
- } /* end else */
- } /* end if */
-#endif
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- if (ret_value < 0 && entry_tagged)
- if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_insert_entry() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_mark_entry_dirty
- *
- * Purpose: Mark a pinned or protected entry as dirty. The target entry
- * MUST be either pinned or protected, and MAY be both.
- *
- * In the protected case, this call is the functional
- * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
- * call.
- *
- * In the pinned but not protected case, if the entry is not
- * already dirty, the function places function marks the entry
- * dirty and places it on the skip list.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 5/15/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_mark_entry_dirty(void *thing)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry_ptr);
- HDassert(H5F_addr_defined(entry_ptr->addr));
- cache_ptr = entry_ptr->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (entry_ptr->is_protected) {
- HDassert(!((entry_ptr)->is_read_only));
-
- /* set the dirtied flag */
- entry_ptr->dirtied = TRUE;
-
- /* reset image_up_to_date */
- if (entry_ptr->image_up_to_date) {
- entry_ptr->image_up_to_date = FALSE;
-
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "Can't propagate serialization status to fd parents")
- } /* end if */
- } /* end if */
- else if (entry_ptr->is_pinned) {
- hbool_t was_clean; /* Whether the entry was previously clean */
- hbool_t image_was_up_to_date;
-
- /* Remember previous dirty status */
- was_clean = !entry_ptr->is_dirty;
-
- /* Check if image is up to date */
- image_was_up_to_date = entry_ptr->image_up_to_date;
-
- /* Mark the entry as dirty if it isn't already */
- entry_ptr->is_dirty = TRUE;
- entry_ptr->image_up_to_date = FALSE;
-
- /* Modify cache data structures */
- if (was_clean)
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL)
- if (!entry_ptr->in_slist)
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
-
- /* Update stats for entry being marked dirty */
- H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
-
- /* Check for entry changing status and do notifications, etc. */
- if (was_clean) {
- /* If the entry's type has a 'notify' callback send a 'entry dirtied'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
-
- /* Propagate the dirty flag up the flush dependency chain if appropriate */
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
- } /* end if */
- if (image_was_up_to_date)
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "Can't propagate serialization status to fd parents")
- } /* end if */
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_mark_entry_dirty() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_mark_entry_clean
- *
- * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned.
- *
- * If the entry is not
- * already clean, the function places function marks the entry
- * clean and removes it from the skip list.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 7/23/16
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_mark_entry_clean(void *_thing)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry_ptr);
- HDassert(H5F_addr_defined(entry_ptr->addr));
- cache_ptr = entry_ptr->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- /* Operate on pinned entry */
- if (entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected")
- else if (entry_ptr->is_pinned) {
- hbool_t was_dirty; /* Whether the entry was previously dirty */
-
- /* Remember previous dirty status */
- was_dirty = entry_ptr->is_dirty;
-
- /* Mark the entry as clean if it isn't already */
- entry_ptr->is_dirty = FALSE;
-
- /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */
- entry_ptr->flush_marker = FALSE;
-
- /* Modify cache data structures */
- if (was_dirty)
- H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL)
- if (entry_ptr->in_slist)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
-
- /* Update stats for entry being marked clean */
- H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
-
- /* Check for entry changing status and do notifications, etc. */
- if (was_dirty) {
- /* If the entry's type has a 'notify' callback send a 'entry cleaned'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
-
- /* Propagate the clean up the flush dependency chain, if appropriate */
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean")
- } /* end if */
- } /* end if */
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_mark_entry_clean() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_mark_entry_unserialized
- *
- * Purpose: Mark a pinned or protected entry as unserialized. The target
- * entry MUST be either pinned or protected, and MAY be both.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 12/23/16
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_mark_entry_unserialized(void *thing)
-{
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)thing;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry);
- HDassert(H5F_addr_defined(entry->addr));
-
- if (entry->is_protected || entry->is_pinned) {
- HDassert(!entry->is_read_only);
-
- /* Reset image_up_to_date */
- if (entry->image_up_to_date) {
- entry->image_up_to_date = FALSE;
-
- if (entry->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_unserialized(entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL,
- "Can't propagate serialization status to fd parents")
- } /* end if */
- } /* end if */
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL,
- "Entry to unserialize is neither pinned nor protected??")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_mark_entry_unserialized() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_mark_entry_serialized
- *
- * Purpose: Mark a pinned entry as serialized. The target entry MUST be
- * pinned.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 12/23/16
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_mark_entry_serialized(void *_thing)
-{
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_thing;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry);
- HDassert(H5F_addr_defined(entry->addr));
-
- /* Operate on pinned entry */
- if (entry->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected")
- else if (entry->is_pinned) {
- /* Check for entry changing status and do notifications, etc. */
- if (!entry->image_up_to_date) {
- /* Set the image_up_to_date flag */
- entry->image_up_to_date = TRUE;
-
- /* Propagate the serialize up the flush dependency chain, if appropriate */
- if (entry->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_serialized(entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL,
- "Can't propagate flush dep serialize")
- } /* end if */
- } /* end if */
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_mark_entry_serialized() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C_move_entry
- *
- * Purpose: Use this function to notify the cache that an entry's
- * file address changed.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 6/2/04
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr)
-{
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *test_entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(type);
- HDassert(H5F_addr_defined(old_addr));
- HDassert(H5F_addr_defined(new_addr));
- HDassert(H5F_addr_ne(old_addr, new_addr));
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
-
- if (entry_ptr == NULL || entry_ptr->type != type)
- /* the old item doesn't exist in the cache, so we are done. */
- HGOTO_DONE(SUCCEED)
-
- HDassert(entry_ptr->addr == old_addr);
- HDassert(entry_ptr->type == type);
-
- /* Check for R/W status, otherwise error */
- /* (Moving a R/O entry would mark it dirty, which shouldn't
- * happen. QAK - 2016/12/02)
- */
- if (entry_ptr->is_read_only)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry")
-
- H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL)
-
- if (test_entry_ptr != NULL) { /* we are hosed */
- if (test_entry_ptr->type == type)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???")
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?")
- } /* end if */
-
- /* If we get this far we have work to do. Remove *entry_ptr from
- * the hash table (and skip list if necessary), change its address to the
- * new address, mark it as dirty (if it isn't already) and then re-insert.
- *
- * Update the replacement policy for a hit to avoid an eviction before
- * the moved entry is touched. Update stats for a move.
- *
- * Note that we do not check the size of the cache, or evict anything.
- * Since this is a simple re-name, cache size should be unaffected.
- *
- * Check to see if the target entry is in the process of being destroyed
- * before we delete from the index, etc. If it is, all we do is
- * change the addr. If the entry is only in the process of being flushed,
- * don't mark it as dirty either, lest we confuse the flush call back.
- */
- if (!entry_ptr->destroy_in_progress) {
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
-
- if (entry_ptr->in_slist) {
- HDassert(cache_ptr->slist_ptr);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
- } /* end if */
- } /* end if */
-
- entry_ptr->addr = new_addr;
-
- if (!entry_ptr->destroy_in_progress) {
- hbool_t was_dirty; /* Whether the entry was previously dirty */
-
- /* Remember previous dirty status */
- was_dirty = entry_ptr->is_dirty;
-
- /* Mark the entry as dirty if it isn't already */
- entry_ptr->is_dirty = TRUE;
-
- /* This shouldn't be needed, but it keeps the test code happy */
- if (entry_ptr->image_up_to_date) {
- entry_ptr->image_up_to_date = FALSE;
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "Can't propagate serialization status to fd parents")
- } /* end if */
-
- /* Modify cache data structures */
- H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
-
- /* Skip some actions if we're in the middle of flushing the entry */
- if (!entry_ptr->flush_in_progress) {
- /* Update the replacement policy for the entry */
- H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
-
- /* Check for entry changing status and do notifications, etc. */
- if (!was_dirty) {
- /* If the entry's type has a 'notify' callback send a 'entry dirtied'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag set")
-
- /* Propagate the dirty flag up the flush dependency chain if appropriate */
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL,
- "Can't propagate flush dep dirty flag")
- } /* end if */
- } /* end if */
- } /* end if */
-
- H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_move_entry() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_resize_entry
- *
- * Purpose: Resize a pinned or protected entry.
- *
- * Resizing an entry dirties it, so if the entry is not
- * already dirty, the function places the entry on the
- * skip list.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 7/5/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_resize_entry(void *thing, size_t new_size)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry_ptr);
- HDassert(H5F_addr_defined(entry_ptr->addr));
- cache_ptr = entry_ptr->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- /* Check for usage errors */
- if (new_size <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive")
- if (!(entry_ptr->is_pinned || entry_ptr->is_protected))
- HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- /* update for change in entry size if necessary */
- if (entry_ptr->size != new_size) {
- hbool_t was_clean;
-
- /* make note of whether the entry was clean to begin with */
- was_clean = !entry_ptr->is_dirty;
-
- /* mark the entry as dirty if it isn't already */
- entry_ptr->is_dirty = TRUE;
-
- /* Reset the image up-to-date status */
- if (entry_ptr->image_up_to_date) {
- entry_ptr->image_up_to_date = FALSE;
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "Can't propagate serialization status to fd parents")
- } /* end if */
-
- /* Release the current image */
- if (entry_ptr->image_ptr)
- entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
-
- /* do a flash cache size increase if appropriate */
- if (cache_ptr->flash_size_increase_possible) {
- if (new_size > entry_ptr->size) {
- size_t size_increase;
-
- size_increase = new_size - entry_ptr->size;
- if (size_increase >= cache_ptr->flash_size_increase_threshold)
- if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed")
- }
- }
-
- /* update the pinned and/or protected entry list */
- if (entry_ptr->is_pinned)
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pel_len, cache_ptr->pel_size, entry_ptr->size,
- new_size, FAIL)
- if (entry_ptr->is_protected)
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pl_len, cache_ptr->pl_size, entry_ptr->size, new_size,
- FAIL)
-
-#ifdef H5_HAVE_PARALLEL
- if (entry_ptr->coll_access)
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->coll_list_len, cache_ptr->coll_list_size,
- entry_ptr->size, new_size, FAIL)
-#endif /* H5_HAVE_PARALLEL */
-
- /* update statistics just before changing the entry size */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size);
-
- /* update the hash table */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean, FAIL);
-
- /* if the entry is in the skip list, update that too */
- if (entry_ptr->in_slist)
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size);
-
- /* finally, update the entry size proper */
- entry_ptr->size = new_size;
-
- if (!entry_ptr->in_slist)
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
-
- if (entry_ptr->is_pinned)
- H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
-
- /* Check for entry changing status and do notifications, etc. */
- if (was_clean) {
- /* If the entry's type has a 'notify' callback send a 'entry dirtied'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
-
- /* Propagate the dirty flag up the flush dependency chain if appropriate */
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
- } /* end if */
- } /* end if */
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_resize_entry() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_pin_protected_entry()
- *
- * Purpose: Pin a protected cache entry. The entry must be protected
- * at the time of call, and must be unpinned.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 4/26/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_pin_protected_entry(void *thing)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry_ptr);
- HDassert(H5F_addr_defined(entry_ptr->addr));
- cache_ptr = entry_ptr->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- /* Only protected entries can be pinned */
- if (!entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected")
-
- /* Pin the entry from a client */
- if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_pin_protected_entry() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_protect
- *
- * Purpose: If the target entry is not in the cache, load it. If
- * necessary, attempt to evict one or more entries to keep
- * the cache within its maximum size.
- *
- * Mark the target entry as protected, and return its address
- * to the caller. The caller must call H5C_unprotect() when
- * finished with the entry.
- *
- * While it is protected, the entry may not be either evicted
- * or flushed -- nor may it be accessed by another call to
- * H5C_protect. Any attempt to do so will result in a failure.
- *
- * Return: Success: Ptr to the desired entry
- * Failure: NULL
- *
- * Programmer: John Mainzer - 6/2/04
- *
- *-------------------------------------------------------------------------
- */
-void *
-H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags)
-{
- H5C_t *cache_ptr;
- H5AC_ring_t ring = H5C_RING_UNDEFINED;
- hbool_t hit;
- hbool_t have_write_permitted = FALSE;
- hbool_t read_only = FALSE;
- hbool_t flush_last;
-#ifdef H5_HAVE_PARALLEL
- hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
-#endif /* H5_HAVE_PARALLEL */
- hbool_t write_permitted = FALSE;
- hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */
- size_t empty_space;
- void *thing;
- H5C_cache_entry_t *entry_ptr;
- void *ret_value = NULL; /* Return value */
-
- FUNC_ENTER_NOAPI(NULL)
-
- /* check args */
- HDassert(f);
- HDassert(f->shared);
-
- cache_ptr = f->shared->cache;
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(type);
- HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
- HDassert(H5F_addr_defined(addr));
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- /* Load the cache image, if requested */
- if (cache_ptr->load_image) {
- cache_ptr->load_image = FALSE;
- if (H5C__load_cache_image(f) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image")
- } /* end if */
-
- read_only = ((flags & H5C__READ_ONLY_FLAG) != 0);
- flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
-
- /* Get the ring type from the API context */
- ring = H5CX_get_ring();
-
-#ifdef H5_HAVE_PARALLEL
- if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
- coll_access = H5F_get_coll_metadata_reads(f);
-#endif /* H5_HAVE_PARALLEL */
-
- /* first check to see if the target is in cache */
- H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
-
- if (entry_ptr != NULL) {
- if (entry_ptr->ring != ring)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry")
-
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- if (entry_ptr->prefetched) {
- /* This call removes the prefetched entry from the cache,
- * and replaces it with an entry deserialized from the
- * image of the prefetched entry.
- */
- if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry")
-
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(entry_ptr->addr == addr);
- } /* end if */
-
- /* Check for trying to load the wrong type of entry from an address */
- if (entry_ptr->type != type)
- HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
-
-#ifdef H5_HAVE_PARALLEL
- /* If this is a collective metadata read, the entry is not marked as
- * collective, and is clean, it is possible that other processes will
- * not have it in its cache and will expect a bcast of the entry from
- * process 0. So process 0 will bcast the entry to all other ranks.
- * Ranks that _do_ have the entry in their cache still have to
- * participate in the bcast.
- */
- if (coll_access) {
- if (!entry_ptr->is_dirty && !entry_ptr->coll_access) {
- MPI_Comm comm; /* File MPI Communicator */
- int mpi_code; /* MPI error code */
- int buf_size;
-
- if (MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f)))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
-
- if (entry_ptr->image_ptr == NULL) {
- int mpi_rank;
-
- if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
-
- if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL,
- "memory allocation failed for on disk image buffer")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- if (0 == mpi_rank && H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- /* If image generation fails, push an error but
- * still participate in the following MPI_Bcast
- */
- HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
- } /* end if */
- HDassert(entry_ptr->image_ptr);
-
- H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
- if (MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
-
- /* Mark the entry as collective and insert into the collective list */
- entry_ptr->coll_access = TRUE;
- H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
- } /* end if */
- else if (entry_ptr->coll_access)
- H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
-
-#ifdef H5C_DO_TAGGING_SANITY_CHECKS
- {
- /* Verify tag value */
- if (cache_ptr->ignore_tags != TRUE) {
- haddr_t tag; /* Tag value */
-
- /* The entry is already in the cache, but make sure that the tag value
- * is still legal. This will ensure that had the entry NOT been in the
- * cache, tagging was still set up correctly and it would have received
- * a legal tag value after getting loaded from disk.
- */
-
- /* Get the tag */
- tag = H5CX_get_tag();
-
- if (H5C_verify_tag(entry_ptr->type->id, tag) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed")
- } /* end if */
- }
-#endif
-
- hit = TRUE;
- thing = (void *)entry_ptr;
- }
- else {
- /* must try to load the entry from disk. */
- hit = FALSE;
- if (NULL == (thing = H5C__load_entry(f,
-#ifdef H5_HAVE_PARALLEL
- coll_access,
-#endif /* H5_HAVE_PARALLEL */
- type, addr, udata)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
-
- entry_ptr = (H5C_cache_entry_t *)thing;
- cache_ptr->entries_loaded_counter++;
-
- entry_ptr->ring = ring;
-#ifdef H5_HAVE_PARALLEL
- if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
- H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
-#endif /* H5_HAVE_PARALLEL */
-
- /* Apply tag to newly protected entry */
- if (H5C__tag_entry(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
-
- /* If the entry is very large, and we are configured to allow it,
- * we may wish to perform a flash cache size increase.
- */
- if (cache_ptr->flash_size_increase_possible &&
- (entry_ptr->size > cache_ptr->flash_size_increase_threshold))
- if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed")
-
- if (cache_ptr->index_size >= cache_ptr->max_cache_size)
- empty_space = 0;
- else
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
-
- /* try to free up if necceary and if evictions are permitted. Note
- * that if evictions are enabled, we will call H5C__make_space_in_cache()
- * regardless if the min_free_space requirement is not met.
- */
- if (cache_ptr->evictions_enabled &&
- (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) ||
- ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) {
-
- size_t space_needed;
-
- if (empty_space <= entry_ptr->size)
- cache_ptr->cache_full = TRUE;
-
- if (cache_ptr->check_write_permitted != NULL) {
- if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
- else
- have_write_permitted = TRUE;
- } /* end if */
- else {
- write_permitted = cache_ptr->write_permitted;
- have_write_permitted = TRUE;
- } /* end else */
-
- HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
- space_needed = entry_ptr->size;
- if (space_needed > cache_ptr->max_cache_size)
- space_needed = cache_ptr->max_cache_size;
-
- /* Note that space_needed is just the amount of space that
- * needed to insert the new entry without exceeding the cache
- * size limit. The subsequent call to H5C__make_space_in_cache()
- * may evict the entries required to free more or less space
- * depending on conditions. It MAY be less if the cache is
- * currently undersized, or more if the cache is oversized.
- *
- * The cache can exceed its maximum size limit via the following
- * mechanisms:
- *
- * First, it is possible for the cache to grow without
- * bound as long as entries are protected and not unprotected.
- *
- * Second, when writes are not permitted it is also possible
- * for the cache to grow without bound.
- *
- * Third, the user may choose to disable evictions -- causing
- * the cache to grow without bound until evictions are
- * re-enabled.
- *
- * Finally, we usually don't check to see if the cache is
- * oversized at the end of an unprotect. As a result, it is
- * possible to have a vastly oversized cache with no protected
- * entries as long as all the protects precede the unprotects.
- */
- if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
- } /* end if */
-
- /* Insert the entry in the hash table.
- *
- * *******************************************
- *
- * Set the flush_me_last field
- * of the newly loaded entry before inserting it into the
- * index. Must do this, as the index tracked the number of
- * entries with the flush_last field set, but assumes that
- * the field will not change after insertion into the index.
- *
- * Note that this means that the H5C__FLUSH_LAST_FLAG flag
- * is ignored if the entry is already in cache.
- */
- entry_ptr->flush_me_last = flush_last;
-
- H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL)
- if (entry_ptr->is_dirty && !entry_ptr->in_slist)
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL)
-
- /* insert the entry in the data structures used by the replacement
- * policy. We are just going to take it out again when we update
- * the replacement policy for a protect, but this simplifies the
- * code. If we do this often enough, we may want to optimize this.
- */
- H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
-
- /* Record that the entry was loaded, to trigger a notify callback later */
- /* (After the entry is fully added to the cache) */
- was_loaded = TRUE;
- } /* end else */
-
- HDassert(entry_ptr->addr == addr);
- HDassert(entry_ptr->type == type);
-
- if (entry_ptr->is_protected) {
- if (read_only && entry_ptr->is_read_only) {
- HDassert(entry_ptr->ro_ref_count > 0);
- (entry_ptr->ro_ref_count)++;
- } /* end if */
- else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?")
- } /* end if */
- else {
- H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL)
-
- entry_ptr->is_protected = TRUE;
- if (read_only) {
- entry_ptr->is_read_only = TRUE;
- entry_ptr->ro_ref_count = 1;
- } /* end if */
- entry_ptr->dirtied = FALSE;
- } /* end else */
-
- H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit)
- H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
-
- ret_value = thing;
-
- if (cache_ptr->evictions_enabled &&
- (cache_ptr->size_decreased ||
- (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)))) {
-
- if (!have_write_permitted) {
- if (cache_ptr->check_write_permitted != NULL) {
- if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted")
- else
- have_write_permitted = TRUE;
- }
- else {
- write_permitted = cache_ptr->write_permitted;
- have_write_permitted = TRUE;
- }
- }
-
- if (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length))
- if (H5C__auto_adjust_cache_size(f, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed")
-
- if (cache_ptr->size_decreased) {
- cache_ptr->size_decreased = FALSE;
-
- /* check to see if the cache is now oversized due to the cache
- * size reduction. If it is, try to evict enough entries to
- * bring the cache size down to the current maximum cache size.
- *
- * Also, if the min_clean_size requirement is not met, we
- * should also call H5C__make_space_in_cache() to bring us
- * into compliance.
- */
- if (cache_ptr->index_size >= cache_ptr->max_cache_size)
- empty_space = 0;
- else
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
-
- if ((cache_ptr->index_size > cache_ptr->max_cache_size) ||
- ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)) {
-
- if (cache_ptr->index_size > cache_ptr->max_cache_size)
- cache_ptr->cache_full = TRUE;
-
- if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
- }
- } /* end if */
- }
-
- /* If we loaded the entry and the entry's type has a 'notify' callback, send
- * an 'after load' notice now that the entry is fully integrated into
- * the cache and protected. We must wait until it is protected so it is not
- * evicted during the notify callback.
- */
- if (was_loaded)
- /* If the entry's type has a 'notify' callback send a 'after load'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL,
- "can't notify client about entry inserted into cache")
-
-#ifdef H5_HAVE_PARALLEL
- /* Make sure the size of the collective entries in the cache remain in check */
- if (coll_access) {
- if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) {
- if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
- if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
- } /* end if */
- else {
- if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100)
- if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
- } /* end else */
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_protect() */
-
-/*-------------------------------------------------------------------------
- *
* Function: H5C_reset_cache_hit_rate_stats()
*
* Purpose: Reset the cache hit rate computation fields.
@@ -2640,7 +1064,6 @@ done:
} /* H5C_set_evictions_enabled() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_set_slist_enabled()
*
* Purpose: Enable or disable the slist as directed.
@@ -2766,426 +1189,6 @@ done:
} /* H5C_set_slist_enabled() */
/*-------------------------------------------------------------------------
- * Function: H5C_unpin_entry()
- *
- * Purpose: Unpin a cache entry. The entry can be either protected or
- * unprotected at the time of call, but must be pinned.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 3/22/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_unpin_entry(void *_entry_ptr)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity check */
- HDassert(entry_ptr);
- cache_ptr = entry_ptr->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- /* Unpin the entry */
- if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client")
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_unpin_entry() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_unprotect
- *
- * Purpose: Undo an H5C_protect() call -- specifically, mark the
- * entry as unprotected, remove it from the protected list,
- * and give it back to the replacement policy.
- *
- * The TYPE and ADDR arguments must be the same as those in
- * the corresponding call to H5C_protect() and the THING
- * argument must be the value returned by that call to
- * H5C_protect().
- *
- * Return: Non-negative on success/Negative on failure
- *
- * If the deleted flag is TRUE, simply remove the target entry
- * from the cache, clear it, and free it without writing it to
- * disk.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 6/2/04
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
-{
- H5C_t *cache_ptr;
- hbool_t deleted;
- hbool_t dirtied;
- hbool_t set_flush_marker;
- hbool_t pin_entry;
- hbool_t unpin_entry;
- hbool_t free_file_space;
- hbool_t take_ownership;
- hbool_t was_clean;
-#ifdef H5_HAVE_PARALLEL
- hbool_t clear_entry = FALSE;
-#endif /* H5_HAVE_PARALLEL */
- H5C_cache_entry_t *entry_ptr;
- H5C_cache_entry_t *test_entry_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- deleted = ((flags & H5C__DELETED_FLAG) != 0);
- dirtied = ((flags & H5C__DIRTIED_FLAG) != 0);
- set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
- pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
- unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0);
- free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
- take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
-
- HDassert(f);
- HDassert(f->shared);
-
- cache_ptr = f->shared->cache;
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(H5F_addr_defined(addr));
- HDassert(thing);
- HDassert(!(pin_entry && unpin_entry));
-
- /* deleted flag must accompany free_file_space */
- HDassert((!free_file_space) || (deleted));
-
- /* deleted flag must accompany take_ownership */
- HDassert((!take_ownership) || (deleted));
-
- /* can't have both free_file_space & take_ownership */
- HDassert(!(free_file_space && take_ownership));
-
- entry_ptr = (H5C_cache_entry_t *)thing;
- HDassert(entry_ptr->addr == addr);
-
- /* also set the dirtied variable if the dirtied field is set in
- * the entry.
- */
- dirtied |= entry_ptr->dirtied;
- was_clean = !(entry_ptr->is_dirty);
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- /* if the entry has multiple read only protects, just decrement
- * the ro_ref_counter. Don't actually unprotect until the ref count
- * drops to zero.
- */
- if (entry_ptr->ro_ref_count > 1) {
- /* Sanity check */
- HDassert(entry_ptr->is_protected);
- HDassert(entry_ptr->is_read_only);
-
- if (dirtied)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
-
- /* Reduce the RO ref count */
- (entry_ptr->ro_ref_count)--;
-
- /* Pin or unpin the entry as requested. */
- if (pin_entry) {
- /* Pin the entry from a client */
- if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
- }
- else if (unpin_entry) {
- /* Unpin the entry from a client */
- if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
- } /* end if */
- }
- else {
- if (entry_ptr->is_read_only) {
- /* Sanity check */
- HDassert(entry_ptr->ro_ref_count == 1);
-
- if (dirtied)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
-
- entry_ptr->is_read_only = FALSE;
- entry_ptr->ro_ref_count = 0;
- } /* end if */
-
-#ifdef H5_HAVE_PARALLEL
- /* When the H5C code is used to implement the metadata cache in the
- * PHDF5 case, only the cache on process 0 is allowed to write to file.
- * All the other metadata caches must hold dirty entries until they
- * are told that the entries are clean.
- *
- * The clear_on_unprotect flag in the H5C_cache_entry_t structure
- * exists to deal with the case in which an entry is protected when
- * its cache receives word that the entry is now clean. In this case,
- * the clear_on_unprotect flag is set, and the entry is flushed with
- * the H5C__FLUSH_CLEAR_ONLY_FLAG.
- *
- * All this is a bit awkward, but until the metadata cache entries
- * are contiguous, with only one dirty flag, we have to let the supplied
- * functions deal with the resetting the is_dirty flag.
- */
- if (entry_ptr->clear_on_unprotect) {
- /* Sanity check */
- HDassert(entry_ptr->is_dirty);
-
- entry_ptr->clear_on_unprotect = FALSE;
- if (!dirtied)
- clear_entry = TRUE;
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
-
- if (!entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??")
-
- /* Mark the entry as dirty if appropriate */
- entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
- if (dirtied && entry_ptr->image_up_to_date) {
- entry_ptr->image_up_to_date = FALSE;
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "Can't propagate serialization status to fd parents")
- } /* end if */
-
- /* Check for newly dirtied entry */
- if (was_clean && entry_ptr->is_dirty) {
- /* Update index for newly dirtied entry */
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL)
-
- /* If the entry's type has a 'notify' callback send a
- * 'entry dirtied' notice now that the entry is fully
- * integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
-
- /* Propagate the flush dep dirty flag up the flush dependency chain
- * if appropriate
- */
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
- } /* end if */
- /* Check for newly clean entry */
- else if (!was_clean && !entry_ptr->is_dirty) {
-
- /* If the entry's type has a 'notify' callback send a
- * 'entry cleaned' notice now that the entry is fully
- * integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
-
- /* Propagate the flush dep clean flag up the flush dependency chain
- * if appropriate
- */
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
- } /* end else-if */
-
- /* Pin or unpin the entry as requested. */
- if (pin_entry) {
- /* Pin the entry from a client */
- if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
- }
- else if (unpin_entry) {
- /* Unpin the entry from a client */
- if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
- } /* end if */
-
- /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on
- * the pinned entry list if entry_ptr->is_pinned is TRUE.
- */
- H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL)
-
- entry_ptr->is_protected = FALSE;
-
- /* if the entry is dirty, 'or' its flush_marker with the set flush flag,
- * and then add it to the skip list if it isn't there already.
- */
- if (entry_ptr->is_dirty) {
- entry_ptr->flush_marker |= set_flush_marker;
- if (!entry_ptr->in_slist)
- /* this is a no-op if cache_ptr->slist_enabled is FALSE */
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-
- /* This implementation of the "deleted" option is a bit inefficient, as
- * we re-insert the entry to be deleted into the replacement policy
- * data structures, only to remove them again. Depending on how often
- * we do this, we may want to optimize a bit.
- */
- if (deleted) {
- unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG);
-
- /* verify that the target entry is in the cache. */
- H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
-
- if (test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
- else if (test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
- "hash table contains multiple entries for addr?!?")
-
- /* Set the 'free file space' flag for the flush, if needed */
- if (free_file_space)
- flush_flags |= H5C__FREE_FILE_SPACE_FLAG;
-
- /* Set the "take ownership" flag for the flush, if needed */
- if (take_ownership)
- flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
-
- /* Delete the entry from the skip list on destroy */
- flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
-
- HDassert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist)));
-
- if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
- } /* end if */
-#ifdef H5_HAVE_PARALLEL
- else if (clear_entry) {
- /* Verify that the target entry is in the cache. */
- H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
-
- if (test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
- else if (test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
- "hash table contains multiple entries for addr?!?")
-
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
- } /* end else if */
-#endif /* H5_HAVE_PARALLEL */
- }
-
- H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
-
-done:
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_unprotect() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C_unsettle_entry_ring
- *
- * Purpose: Advise the metadata cache that the specified entry's free space
- * manager ring is no longer settled (if it was on entry).
- *
- * If the target free space manager ring is already
- * unsettled, do nothing, and return SUCCEED.
- *
- * If the target free space manager ring is settled, and
- * we are not in the process of a file shutdown, mark
- * the ring as unsettled, and return SUCCEED.
- *
- * If the target free space manager is settled, and we
- * are in the process of a file shutdown, post an error
- * message, and return FAIL.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * January 3, 2017
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_unsettle_entry_ring(void *_entry)
-{
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
- H5C_t *cache; /* Cache for file */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry);
- HDassert(entry->ring != H5C_RING_UNDEFINED);
- HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) ||
- (H5C_RING_MDFSM == entry->ring));
- cache = entry->cache_ptr;
- HDassert(cache);
- HDassert(cache->magic == H5C__H5C_T_MAGIC);
-
- switch (entry->ring) {
- case H5C_RING_USER:
- /* Do nothing */
- break;
-
- case H5C_RING_RDFSM:
- if (cache->rdfsm_settled) {
- if (cache->flush_in_progress || cache->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
- cache->rdfsm_settled = FALSE;
- } /* end if */
- break;
-
- case H5C_RING_MDFSM:
- if (cache->mdfsm_settled) {
- if (cache->flush_in_progress || cache->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
- cache->mdfsm_settled = FALSE;
- } /* end if */
- break;
-
- default:
- HDassert(FALSE); /* this should be un-reachable */
- break;
- } /* end switch */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_unsettle_entry_ring() */
-
-/*-------------------------------------------------------------------------
* Function: H5C_unsettle_ring()
*
* Purpose: Advise the metadata cache that the specified free space
@@ -3386,3764 +1389,6 @@ done:
} /* H5C_validate_resize_config() */
/*-------------------------------------------------------------------------
- * Function: H5C_create_flush_dependency()
- *
- * Purpose: Initiates a parent<->child entry flush dependency. The parent
- * entry must be pinned or protected at the time of call, and must
- * have all dependencies removed before the cache can shut down.
- *
- * Note: Flush dependencies in the cache indicate that a child entry
- * must be flushed to the file before its parent. (This is
- * currently used to implement Single-Writer/Multiple-Reader (SWMR)
- * I/O access for data structures in the file).
- *
- * Creating a flush dependency between two entries will also pin
- * the parent entry.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_create_flush_dependency(void *parent_thing, void *child_thing)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
- H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(parent_entry);
- HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(child_entry);
- HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(child_entry->addr));
- cache_ptr = parent_entry->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr == child_entry->cache_ptr);
-#ifndef NDEBUG
- /* Make sure the parent is not already a parent */
- {
- unsigned u;
-
- for (u = 0; u < child_entry->flush_dep_nparents; u++)
- HDassert(child_entry->flush_dep_parent[u] != parent_entry);
- } /* end block */
-#endif
-
- /* More sanity checks */
- if (child_entry == parent_entry)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
- if (!(parent_entry->is_protected || parent_entry->is_pinned))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
-
- /* Check for parent not pinned */
- if (!parent_entry->is_pinned) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_nchildren == 0);
- HDassert(!parent_entry->pinned_from_client);
- HDassert(!parent_entry->pinned_from_cache);
-
- /* Pin the parent entry */
- parent_entry->is_pinned = TRUE;
- H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
- } /* end else */
-
- /* Mark the entry as pinned from the cache's action (possibly redundantly) */
- parent_entry->pinned_from_cache = TRUE;
-
- /* Check if we need to resize the child's parent array */
- if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
- if (child_entry->flush_dep_parent_nalloc == 0) {
- /* Array does not exist yet, allocate it */
- HDassert(!child_entry->flush_dep_parent);
-
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
- } /* end if */
- else {
- /* Resize existing array */
- HDassert(child_entry->flush_dep_parent);
-
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
- 2 * child_entry->flush_dep_parent_nalloc)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc *= 2;
- } /* end else */
- cache_ptr->entry_fd_height_change_counter++;
- } /* end if */
-
- /* Add the dependency to the child's parent array */
- child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
- child_entry->flush_dep_nparents++;
-
- /* Increment parent's number of children */
- parent_entry->flush_dep_nchildren++;
-
- /* Adjust the number of dirty children */
- if (child_entry->is_dirty) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
-
- parent_entry->flush_dep_ndirty_children++;
-
- /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag set")
- } /* end if */
-
- /* adjust the parent's number of unserialized children. Note
- * that it is possible for and entry to be clean and unserialized.
- */
- if (!child_entry->image_up_to_date) {
- HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
-
- parent_entry->flush_dep_nunser_children++;
-
- /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag reset")
- } /* end if */
-
- /* Post-conditions, for successful operation */
- HDassert(parent_entry->is_pinned);
- HDassert(parent_entry->flush_dep_nchildren > 0);
- HDassert(child_entry->flush_dep_parent);
- HDassert(child_entry->flush_dep_nparents > 0);
- HDassert(child_entry->flush_dep_parent_nalloc > 0);
-#ifndef NDEBUG
- H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
-#endif
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_create_flush_dependency() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_destroy_flush_dependency()
- *
- * Purpose: Terminates a parent<-> child entry flush dependency. The
- * parent entry must be pinned.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_destroy_flush_dependency(void *parent_thing, void *child_thing)
-{
- H5C_t *cache_ptr;
- H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
- H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(parent_entry);
- HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(child_entry);
- HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(child_entry->addr));
- cache_ptr = parent_entry->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr == child_entry->cache_ptr);
-
- /* Usage checks */
- if (!parent_entry->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
- if (NULL == child_entry->flush_dep_parent)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Child entry doesn't have a flush dependency parent array")
- if (0 == parent_entry->flush_dep_nchildren)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Parent entry flush dependency ref. count has no child dependencies")
-
- /* Search for parent in child's parent array. This is a linear search
- * because we do not expect large numbers of parents. If this changes, we
- * may wish to change the parent array to a skip list */
- for (u = 0; u < child_entry->flush_dep_nparents; u++)
- if (child_entry->flush_dep_parent[u] == parent_entry)
- break;
- if (u == child_entry->flush_dep_nparents)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Parent entry isn't a flush dependency parent for child entry")
-
- /* Remove parent entry from child's parent array */
- if (u < (child_entry->flush_dep_nparents - 1))
- HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1],
- (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
- child_entry->flush_dep_nparents--;
-
- /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
- parent_entry->flush_dep_nchildren--;
- if (0 == parent_entry->flush_dep_nchildren) {
- /* Sanity check */
- HDassert(parent_entry->pinned_from_cache);
-
- /* Check if we should unpin parent entry now */
- if (!parent_entry->pinned_from_client)
- if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
-
- /* Mark the entry as unpinned from the cache's action */
- parent_entry->pinned_from_cache = FALSE;
- } /* end if */
-
- /* Adjust parent entry's ndirty_children */
- if (child_entry->is_dirty) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_ndirty_children > 0);
-
- parent_entry->flush_dep_ndirty_children--;
-
- /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag reset")
- } /* end if */
-
- /* adjust parent entry's number of unserialized children */
- if (!child_entry->image_up_to_date) {
- HDassert(parent_entry->flush_dep_nunser_children > 0);
-
- parent_entry->flush_dep_nunser_children--;
-
- /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag set")
- } /* end if */
-
- /* Shrink or free the parent array if appropriate */
- if (child_entry->flush_dep_nparents == 0) {
- child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent);
- child_entry->flush_dep_parent_nalloc = 0;
- } /* end if */
- else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT &&
- child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
- child_entry->flush_dep_parent_nalloc / 4)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc /= 4;
- } /* end if */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_destroy_flush_dependency() */
-
-/*************************************************************************/
-/**************************** Private Functions: *************************/
-/*************************************************************************/
-
-/*-------------------------------------------------------------------------
- * Function: H5C__pin_entry_from_client()
- *
- * Purpose: Internal routine to pin a cache entry from a client action.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/26/09
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__pin_entry_from_client(H5C_t
-#if !H5C_COLLECT_CACHE_STATS
- H5_ATTR_UNUSED
-#endif
- *cache_ptr,
- H5C_cache_entry_t *entry_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
- HDassert(entry_ptr->is_protected);
-
- /* Check if the entry is already pinned */
- if (entry_ptr->is_pinned) {
- /* Check if the entry was pinned through an explicit pin from a client */
- if (entry_ptr->pinned_from_client)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
- } /* end if */
- else {
- entry_ptr->is_pinned = TRUE;
-
- H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
- } /* end else */
-
- /* Mark that the entry was pinned through an explicit pin from a client */
- entry_ptr->pinned_from_client = TRUE;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__pin_entry_from_client() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__unpin_entry_real()
- *
- * Purpose: Internal routine to unpin a cache entry.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 1/6/18
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
-#ifdef H5C_DO_SANITY_CHECKS
- FUNC_ENTER_PACKAGE
-#else
- FUNC_ENTER_PACKAGE_NOERR
-#endif
-
- /* Sanity checking */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
- HDassert(entry_ptr->is_pinned);
-
- /* If requested, update the replacement policy if the entry is not protected */
- if (update_rp && !entry_ptr->is_protected)
- H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
-
- /* Unpin the entry now */
- entry_ptr->is_pinned = FALSE;
-
- /* Update the stats for an unpin operation */
- H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
-
-#ifdef H5C_DO_SANITY_CHECKS
-done:
-#endif
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__unpin_entry_real() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__unpin_entry_from_client()
- *
- * Purpose: Internal routine to unpin a cache entry from a client action.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/24/09
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checking */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
-
- /* Error checking (should be sanity checks?) */
- if (!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
- if (!entry_ptr->pinned_from_client)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
-
- /* Check if the entry is not pinned from a flush dependency */
- if (!entry_ptr->pinned_from_cache)
- if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
-
- /* Mark the entry as explicitly unpinned by the client */
- entry_ptr->pinned_from_client = FALSE;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__unpin_entry_from_client() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__auto_adjust_cache_size
- *
- * Purpose: Obtain the current full cache hit rate, and compare it
- * with the hit rate thresholds for modifying cache size.
- * If one of the thresholds has been crossed, adjusts the
- * size of the cache accordingly.
- *
- * The function then resets the full cache hit rate
- * statistics, and exits.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- *
- * Programmer: John Mainzer, 10/7/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted)
-{
- H5C_t *cache_ptr = f->shared->cache;
- hbool_t reentrant_call = FALSE;
- hbool_t inserted_epoch_marker = FALSE;
- size_t new_max_cache_size = 0;
- size_t old_max_cache_size = 0;
- size_t new_min_clean_size = 0;
- size_t old_min_clean_size = 0;
- double hit_rate;
- enum H5C_resize_status status = in_spec; /* will change if needed */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length);
- HDassert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction);
- HDassert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0);
-
- /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the resize
- * process. To avoid an infinite recursion, set reentrant_call to
- * TRUE, and goto done.
- */
- if (cache_ptr->resize_in_progress) {
- reentrant_call = TRUE;
- HGOTO_DONE(SUCCEED)
- } /* end if */
-
- cache_ptr->resize_in_progress = TRUE;
-
- if (!cache_ptr->resize_enabled)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
-
- HDassert((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) ||
- (cache_ptr->resize_ctl.decr_mode != H5C_decr__off));
-
- if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
-
- HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0));
-
- switch (cache_ptr->resize_ctl.incr_mode) {
- case H5C_incr__off:
- if (cache_ptr->size_increase_possible)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
- break;
-
- case H5C_incr__threshold:
- if (hit_rate < cache_ptr->resize_ctl.lower_hr_threshold) {
- if (!cache_ptr->size_increase_possible)
- status = increase_disabled;
- else if (cache_ptr->max_cache_size >= cache_ptr->resize_ctl.max_size) {
- HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.max_size);
- status = at_max_size;
- }
- else if (!cache_ptr->cache_full)
- status = not_full;
- else {
- new_max_cache_size =
- (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.increment);
-
- /* clip to max size if necessary */
- if (new_max_cache_size > cache_ptr->resize_ctl.max_size)
- new_max_cache_size = cache_ptr->resize_ctl.max_size;
-
- /* clip to max increment if necessary */
- if (cache_ptr->resize_ctl.apply_max_increment &&
- ((cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment) <
- new_max_cache_size))
- new_max_cache_size = cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment;
-
- status = increase;
- }
- }
- break;
-
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
- }
-
- /* If the decr_mode is either age out or age out with threshold, we
- * must run the marker maintenance code, whether we run the size
- * reduction code or not. We do this in two places -- here we
- * insert a new marker if the number of active epoch markers is
- * is less than the current epochs before eviction, and after
- * the ageout call, we cycle the markers.
- *
- * However, we can't call the ageout code or cycle the markers
- * unless there was a full complement of markers in place on
- * entry. The inserted_epoch_marker flag is used to track this.
- */
-
- if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) ||
- (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) &&
- (cache_ptr->epoch_markers_active < cache_ptr->resize_ctl.epochs_before_eviction)) {
-
- if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
-
- inserted_epoch_marker = TRUE;
- }
-
- /* don't run the cache size decrease code unless the cache size
- * increase code is disabled, or the size increase code sees no need
- * for action. In either case, status == in_spec at this point.
- */
-
- if (status == in_spec) {
- switch (cache_ptr->resize_ctl.decr_mode) {
- case H5C_decr__off:
- break;
-
- case H5C_decr__threshold:
- if (hit_rate > cache_ptr->resize_ctl.upper_hr_threshold) {
- if (!cache_ptr->size_decrease_possible)
- status = decrease_disabled;
- else if (cache_ptr->max_cache_size <= cache_ptr->resize_ctl.min_size) {
- HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.min_size);
- status = at_min_size;
- }
- else {
- new_max_cache_size =
- (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.decrement);
-
- /* clip to min size if necessary */
- if (new_max_cache_size < cache_ptr->resize_ctl.min_size)
- new_max_cache_size = cache_ptr->resize_ctl.min_size;
-
- /* clip to max decrement if necessary */
- if (cache_ptr->resize_ctl.apply_max_decrement &&
- ((cache_ptr->resize_ctl.max_decrement + new_max_cache_size) <
- cache_ptr->max_cache_size))
- new_max_cache_size =
- cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement;
-
- status = decrease;
- }
- }
- break;
-
- case H5C_decr__age_out_with_threshold:
- case H5C_decr__age_out:
- if (!inserted_epoch_marker) {
- if (!cache_ptr->size_decrease_possible)
- status = decrease_disabled;
- else {
- if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size,
- write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
- } /* end else */
- } /* end if */
- break;
-
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
- }
- }
-
- /* cycle the epoch markers here if appropriate */
- if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) ||
- (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) &&
- !inserted_epoch_marker)
- /* move last epoch marker to the head of the LRU list */
- if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
-
- if ((status == increase) || (status == decrease)) {
- old_max_cache_size = cache_ptr->max_cache_size;
- old_min_clean_size = cache_ptr->min_clean_size;
-
- new_min_clean_size =
- (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction));
-
- /* new_min_clean_size is of size_t, and thus must be non-negative.
- * Hence we have
- *
- * ( 0 <= new_min_clean_size ).
- *
- * by definition.
- */
- HDassert(new_min_clean_size <= new_max_cache_size);
- HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size);
- HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size);
-
- cache_ptr->max_cache_size = new_max_cache_size;
- cache_ptr->min_clean_size = new_min_clean_size;
-
- if (status == increase)
- cache_ptr->cache_full = FALSE;
- else if (status == decrease)
- cache_ptr->size_decreased = TRUE;
-
- /* update flash cache size increase fields as appropriate */
- if (cache_ptr->flash_size_increase_possible) {
- switch (cache_ptr->resize_ctl.flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
-
- case H5C_flash_incr__add_space:
- cache_ptr->flash_size_increase_threshold =
- (size_t)(((double)(cache_ptr->max_cache_size)) *
- (cache_ptr->resize_ctl.flash_threshold));
- break;
-
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
- }
- }
- }
-
- if (cache_ptr->resize_ctl.rpt_fcn != NULL)
- (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status,
- old_max_cache_size, new_max_cache_size, old_min_clean_size,
- new_min_clean_size);
-
- if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
- /* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
-
-done:
- /* Sanity checks */
- HDassert(cache_ptr->resize_in_progress);
- if (!reentrant_call)
- cache_ptr->resize_in_progress = FALSE;
- HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__auto_adjust_cache_size() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout
- *
- * Purpose: Implement the ageout automatic cache size decrement
- * algorithm. Note that while this code evicts aged out
- * entries, the code does not change the maximum cache size.
- * Instead, the function simply computes the new value (if
- * any change is indicated) and reports this value in
- * *new_max_cache_size_ptr.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- *
- * Programmer: John Mainzer, 11/18/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
- size_t *new_max_cache_size_ptr, hbool_t write_permitted)
-{
- H5C_t *cache_ptr = f->shared->cache;
- size_t test_size;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert((status_ptr) && (*status_ptr == in_spec));
- HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0));
-
- /* remove excess epoch markers if any */
- if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction)
- if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
-
- if ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) ||
- ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold) &&
- (hit_rate >= cache_ptr->resize_ctl.upper_hr_threshold))) {
-
- if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.min_size) {
- /* evict aged out cache entries if appropriate... */
- if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
-
- /* ... and then reduce cache size if appropriate */
- if (cache_ptr->index_size < cache_ptr->max_cache_size) {
- if (cache_ptr->resize_ctl.apply_empty_reserve) {
- test_size =
- (size_t)(((double)cache_ptr->index_size) / (1 - cache_ptr->resize_ctl.empty_reserve));
- if (test_size < cache_ptr->max_cache_size) {
- *status_ptr = decrease;
- *new_max_cache_size_ptr = test_size;
- }
- }
- else {
- *status_ptr = decrease;
- *new_max_cache_size_ptr = cache_ptr->index_size;
- }
-
- if (*status_ptr == decrease) {
- /* clip to min size if necessary */
- if (*new_max_cache_size_ptr < cache_ptr->resize_ctl.min_size)
- *new_max_cache_size_ptr = cache_ptr->resize_ctl.min_size;
-
- /* clip to max decrement if necessary */
- if ((cache_ptr->resize_ctl.apply_max_decrement) &&
- ((cache_ptr->resize_ctl.max_decrement + *new_max_cache_size_ptr) <
- cache_ptr->max_cache_size))
- *new_max_cache_size_ptr =
- cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement;
- }
- }
- }
- else
- *status_ptr = at_min_size;
- }
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__autoadjust__ageout() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__cycle_epoch_marker
- *
- * Purpose: Remove the oldest epoch marker from the LRU list,
- * and reinsert it at the head of the LRU list. Also
- * remove the epoch marker's index from the head of the
- * ring buffer, and re-insert it at the tail of the ring
- * buffer.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr)
-{
- int i;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (cache_ptr->epoch_markers_active <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
-
- /* remove the last marker from both the ring buffer and the LRU list */
- i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- if (cache_ptr->epoch_marker_ringbuf_size <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
-
- cache_ptr->epoch_marker_ringbuf_size -= 1;
- if (cache_ptr->epoch_marker_active[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
-
- H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
- (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
-
- /* now, re-insert it at the head of the LRU list, and at the tail of
- * the ring buffer.
- */
- HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i);
- HDassert(cache_ptr->epoch_markers[i].next == NULL);
- HDassert(cache_ptr->epoch_markers[i].prev == NULL);
-
- cache_ptr->epoch_marker_ringbuf_last =
- (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_last] = i;
- if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
-
- cache_ptr->epoch_marker_ringbuf_size += 1;
-
- H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
- cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
-done:
-
- FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5C__autoadjust__ageout__cycle_epoch_marker() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__evict_aged_out_entries
- *
- * Purpose: Evict clean entries in the cache that haven't
- * been accessed for at least
- * cache_ptr->resize_ctl.epochs_before_eviction epochs,
- * and flush dirty entries that haven't been accessed for
- * that amount of time.
- *
- * Depending on configuration, the function will either
- * flush or evict all such entries, or all such entries it
- * encounters until it has freed the maximum amount of space
- * allowed under the maximum decrement.
- *
- * If we are running in parallel mode, writes may not be
- * permitted. If so, the function simply skips any dirty
- * entries it may encounter.
- *
- * The function makes no attempt to maintain the minimum
- * clean size, as there is no guarantee that the cache size
- * will be changed.
- *
- * If there is no cache size change, the minimum clean size
- * constraint will be met through a combination of clean
- * entries and free space in the cache.
- *
- * If there is a cache size reduction, the minimum clean size
- * will be re-calculated, and will be enforced the next time
- * we have to make space in the cache.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted)
-{
- H5C_t *cache_ptr = f->shared->cache;
- size_t eviction_size_limit;
- size_t bytes_evicted = 0;
- hbool_t prev_is_dirty = FALSE;
- hbool_t restart_scan;
- H5C_cache_entry_t *entry_ptr;
- H5C_cache_entry_t *next_ptr;
- H5C_cache_entry_t *prev_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- /* if there is a limit on the amount that the cache size can be decrease
- * in any one round of the cache size reduction algorithm, load that
- * limit into eviction_size_limit. Otherwise, set eviction_size_limit
- * to the equivalent of infinity. The current size of the index will
- * do nicely.
- */
- if (cache_ptr->resize_ctl.apply_max_decrement)
- eviction_size_limit = cache_ptr->resize_ctl.max_decrement;
- else
- eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
-
- if (write_permitted) {
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
- while (entry_ptr != NULL && entry_ptr->type->id != H5AC_EPOCH_MARKER_ID &&
- bytes_evicted < eviction_size_limit) {
- hbool_t skipping_entry = FALSE;
-
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
-
- next_ptr = entry_ptr->next;
- prev_ptr = entry_ptr->prev;
-
- if (prev_ptr != NULL)
- prev_is_dirty = prev_ptr->is_dirty;
-
- if (entry_ptr->is_dirty) {
- HDassert(!entry_ptr->prefetched_dirty);
-
- /* dirty corked entry is skipped */
- if (entry_ptr->tag_info && entry_ptr->tag_info->corked)
- skipping_entry = TRUE;
- else {
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
-
- if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
-
- if (cache_ptr->entries_removed_counter > 1 ||
- cache_ptr->last_entry_removed_ptr == prev_ptr)
- restart_scan = TRUE;
- } /* end else */
- } /* end if */
- else if (!entry_ptr->prefetched_dirty) {
- bytes_evicted += entry_ptr->size;
-
- if (H5C__flush_single_entry(
- f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- } /* end else-if */
- else {
- HDassert(!entry_ptr->is_dirty);
- HDassert(entry_ptr->prefetched_dirty);
-
- skipping_entry = TRUE;
- } /* end else */
-
- if (prev_ptr != NULL) {
- if (skipping_entry)
- entry_ptr = prev_ptr;
- else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) ||
- (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
- /* Something has happened to the LRU -- start over
- * from the tail.
- */
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
-
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
- } /* end else-if */
- else
- entry_ptr = prev_ptr;
- } /* end if */
- else
- entry_ptr = NULL;
- } /* end while */
-
- /* for now at least, don't bother to maintain the minimum clean size,
- * as the cache should now be less than its maximum size. Due to
- * the vaguries of the cache size reduction algorithm, we may not
- * reduce the size of the cache.
- *
- * If we do, we will calculate a new minimum clean size, which will
- * be enforced the next time we try to make space in the cache.
- *
- * If we don't, no action is necessary, as we have just evicted and/or
- * or flushed a bunch of entries and therefore the sum of the clean
- * and free space in the cache must be greater than or equal to the
- * min clean space requirement (assuming that requirement was met on
- * entry).
- */
- } /* end if */
- else /* ! write_permitted */ {
- /* Since we are not allowed to write, all we can do is evict
- * any clean entries that we may encounter before we either
- * hit the eviction size limit, or encounter the epoch marker.
- *
- * If we are operating read only, this isn't an issue, as there
- * will not be any dirty entries.
- *
- * If we are operating in R/W mode, all the dirty entries we
- * skip will be flushed the next time we attempt to make space
- * when writes are permitted. This may have some local
- * performance implications, but it shouldn't cause any net
- * slowdown.
- */
- HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
- entry_ptr = cache_ptr->LRU_tail_ptr;
- while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
- (bytes_evicted < eviction_size_limit)) {
- HDassert(!(entry_ptr->is_protected));
-
- prev_ptr = entry_ptr->prev;
-
- if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
- if (H5C__flush_single_entry(
- f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
-
- /* just skip the entry if it is dirty, as we can't do
- * anything with it now since we can't write.
- *
- * Since all entries are clean, serialize() will not be called,
- * and thus we needn't test to see if the LRU has been changed
- * out from under us.
- */
- entry_ptr = prev_ptr;
- } /* end while */
- } /* end else */
-
- if (cache_ptr->index_size < cache_ptr->max_cache_size)
- cache_ptr->cache_full = FALSE;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__autoadjust__ageout__evict_aged_out_entries() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__insert_new_marker
- *
- * Purpose: Find an unused marker cache entry, mark it as used, and
- * insert it at the head of the LRU list. Also add the
- * marker's index in the epoch_markers array.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/19/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr)
-{
- int i;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
-
- /* find an unused marker */
- i = 0;
- while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
- i++;
- if (i >= H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
-
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
-
- (cache_ptr->epoch_marker_active)[i] = TRUE;
-
- cache_ptr->epoch_marker_ringbuf_last =
- (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
- if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
-
- cache_ptr->epoch_marker_ringbuf_size += 1;
-
- H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
- cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
-
- cache_ptr->epoch_markers_active += 1;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__autoadjust__ageout__insert_new_marker() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__remove_all_markers
- *
- * Purpose: Remove all epoch markers from the LRU list and mark them
- * as inactive.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr)
-{
- int ring_buf_index;
- int i;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- while (cache_ptr->epoch_markers_active > 0) {
- /* get the index of the last epoch marker in the LRU list
- * and remove it from the ring buffer.
- */
-
- ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
- i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
-
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
-
- if (cache_ptr->epoch_marker_ringbuf_size <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- cache_ptr->epoch_marker_ringbuf_size -= 1;
-
- if (cache_ptr->epoch_marker_active[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
-
- /* remove the epoch marker from the LRU list */
- H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
- cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
-
- /* mark the epoch marker as unused. */
- cache_ptr->epoch_marker_active[i] = FALSE;
-
- HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i);
- HDassert(cache_ptr->epoch_markers[i].next == NULL);
- HDassert(cache_ptr->epoch_markers[i].prev == NULL);
-
- /* decrement the number of active epoch markers */
- cache_ptr->epoch_markers_active -= 1;
-
- HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
- }
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__autoadjust__ageout__remove_all_markers() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__remove_excess_markers
- *
- * Purpose: Remove epoch markers from the end of the LRU list and
- * mark them as inactive until the number of active markers
- * equals the current value of
- * cache_ptr->resize_ctl.epochs_before_eviction.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/19/04
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr)
-{
- int ring_buf_index;
- int i;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
-
- while (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) {
- /* get the index of the last epoch marker in the LRU list
- * and remove it from the ring buffer.
- */
- ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
- i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
-
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
-
- if (cache_ptr->epoch_marker_ringbuf_size <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- cache_ptr->epoch_marker_ringbuf_size -= 1;
-
- if (cache_ptr->epoch_marker_active[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
-
- /* remove the epoch marker from the LRU list */
- H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
- cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
-
- /* mark the epoch marker as unused. */
- cache_ptr->epoch_marker_active[i] = FALSE;
-
- HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i);
- HDassert(cache_ptr->epoch_markers[i].next == NULL);
- HDassert(cache_ptr->epoch_markers[i].prev == NULL);
-
- /* decrement the number of active epoch markers */
- cache_ptr->epoch_markers_active -= 1;
-
- HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
- }
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__autoadjust__ageout__remove_excess_markers() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__flash_increase_cache_size
- *
- * Purpose: If there is not at least new_entry_size - old_entry_size
- * bytes of free space in the cache and the current
- * max_cache_size is less than cache_ptr->resize_ctl.max_size,
- * perform a flash increase in the cache size and then reset
- * the full cache hit rate statistics, and exit.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 12/31/07
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size)
-{
- size_t new_max_cache_size = 0;
- size_t old_max_cache_size = 0;
- size_t new_min_clean_size = 0;
- size_t old_min_clean_size = 0;
- size_t space_needed;
- enum H5C_resize_status status = flash_increase; /* may change */
- double hit_rate;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->flash_size_increase_possible);
- HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold);
- HDassert(old_entry_size < new_entry_size);
-
- if (old_entry_size >= new_entry_size)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
-
- space_needed = new_entry_size - old_entry_size;
- if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
- (cache_ptr->max_cache_size < cache_ptr->resize_ctl.max_size)) {
- switch (cache_ptr->resize_ctl.flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
-
- case H5C_flash_incr__add_space:
- if (cache_ptr->index_size < cache_ptr->max_cache_size) {
- HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed);
- space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size;
- }
- space_needed = (size_t)(((double)space_needed) * cache_ptr->resize_ctl.flash_multiple);
- new_max_cache_size = cache_ptr->max_cache_size + space_needed;
- break;
-
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
- }
-
- if (new_max_cache_size > cache_ptr->resize_ctl.max_size)
- new_max_cache_size = cache_ptr->resize_ctl.max_size;
- HDassert(new_max_cache_size > cache_ptr->max_cache_size);
-
- new_min_clean_size = (size_t)((double)new_max_cache_size * cache_ptr->resize_ctl.min_clean_fraction);
- HDassert(new_min_clean_size <= new_max_cache_size);
-
- old_max_cache_size = cache_ptr->max_cache_size;
- old_min_clean_size = cache_ptr->min_clean_size;
-
- cache_ptr->max_cache_size = new_max_cache_size;
- cache_ptr->min_clean_size = new_min_clean_size;
-
- /* update flash cache size increase fields as appropriate */
- HDassert(cache_ptr->flash_size_increase_possible);
-
- switch (cache_ptr->resize_ctl.flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
-
- case H5C_flash_incr__add_space:
- cache_ptr->flash_size_increase_threshold =
- (size_t)((double)cache_ptr->max_cache_size * cache_ptr->resize_ctl.flash_threshold);
- break;
-
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
- }
-
- /* note that we don't cycle the epoch markers. We can
- * argue either way as to whether we should, but for now
- * we don't.
- */
-
- if (cache_ptr->resize_ctl.rpt_fcn != NULL) {
- /* get the hit rate for the reporting function. Should still
- * be good as we haven't reset the hit rate statistics.
- */
- if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
-
- (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status,
- old_max_cache_size, new_max_cache_size, old_min_clean_size,
- new_min_clean_size);
- }
-
- if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
- /* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
- }
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__flash_increase_cache_size() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_invalidate_cache
- *
- * Purpose: Flush and destroy the entries contained in the target
- * cache.
- *
- * If the cache contains protected entries, the function will
- * fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
- * flushed and destroyed before the function returns failure.
- *
- * While pinned entries can usually be flushed, they cannot
- * be destroyed. However, they should be unpinned when all
- * the entries that reference them have been destroyed (thus
- * reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
- *
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
- * (including the pinned dirty entries where permitted) and
- * destroying all unpinned entries. This process is repeated
- * until either the cache is empty, or the number of pinned
- * entries stops decreasing on each pass.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 3/24/05
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
-{
- H5C_t *cache_ptr;
- H5C_ring_t ring;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
- HDassert(cache_ptr->slist_enabled);
-
-#ifdef H5C_DO_SANITY_CHECKS
- {
- int32_t i;
- uint32_t index_len = 0;
- uint32_t slist_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
-
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
-
- for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
-
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
-
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
- }
-#endif /* H5C_DO_SANITY_CHECKS */
-
- /* remove ageout markers if present */
- if (cache_ptr->epoch_markers_active > 0)
- if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
-
- /* flush invalidate each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
- while (ring < H5C_RING_NTYPES) {
- if (H5C__flush_invalidate_ring(f, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
- ring++;
- } /* end while */
-
-#ifndef NDEBUG
- /* Invariants, after destroying all entries in the hash table */
- if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
- HDassert(cache_ptr->index_size == 0);
- HDassert(cache_ptr->clean_index_size == 0);
- HDassert(cache_ptr->pel_len == 0);
- HDassert(cache_ptr->pel_size == 0);
- } /* end if */
- else {
- H5C_cache_entry_t *entry_ptr; /* Cache entry */
- unsigned u; /* Local index variable */
-
- /* All rings except ring 4 should be empty now */
- /* (Ring 4 has the superblock) */
- for (u = H5C_RING_USER; u < H5C_RING_SB; u++) {
- HDassert(cache_ptr->index_ring_len[u] == 0);
- HDassert(cache_ptr->index_ring_size[u] == 0);
- HDassert(cache_ptr->clean_index_ring_size[u] == 0);
- } /* end for */
-
- /* Check that any remaining pinned entries are in the superblock ring */
- entry_ptr = cache_ptr->pel_head_ptr;
- while (entry_ptr) {
- /* Check ring */
- HDassert(entry_ptr->ring == H5C_RING_SB);
-
- /* Advance to next entry in pinned entry list */
- entry_ptr = entry_ptr->next;
- } /* end while */
- } /* end else */
-
- HDassert(cache_ptr->dirty_index_size == 0);
- HDassert(cache_ptr->slist_len == 0);
- HDassert(cache_ptr->slist_size == 0);
- HDassert(cache_ptr->pl_len == 0);
- HDassert(cache_ptr->pl_size == 0);
- HDassert(cache_ptr->LRU_list_len == 0);
- HDassert(cache_ptr->LRU_list_size == 0);
-#endif
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__flush_invalidate_cache() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__flush_invalidate_ring
- *
- * Purpose: Flush and destroy the entries contained in the target
- * cache and ring.
- *
- * If the ring contains protected entries, the function will
- * fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
- * flushed and destroyed before the function returns failure.
- *
- * While pinned entries can usually be flushed, they cannot
- * be destroyed. However, they should be unpinned when all
- * the entries that reference them have been destroyed (thus
- * reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
- *
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
- * (including the pinned dirty entries where permitted) and
- * destroying all unpinned entries. This process is repeated
- * until either the cache is empty, or the number of pinned
- * entries stops decreasing on each pass.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the cache flushing
- * entries in flush dependency order.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/1/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
-{
- H5C_t *cache_ptr;
- hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
- int32_t i;
- uint32_t cur_ring_pel_len;
- uint32_t old_ring_pel_len;
- unsigned cooked_flags;
- unsigned evict_flags;
- H5SL_node_t *node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *next_entry_ptr = NULL;
-#ifdef H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- HDassert(f->shared);
-
- cache_ptr = f->shared->cache;
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_enabled);
- HDassert(cache_ptr->slist_ptr);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
- HDassert(cache_ptr->epoch_markers_active == 0);
-
- /* Filter out the flags that are not relevant to the flush/invalidate.
- */
- cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
- evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
-
- /* The flush procedure here is a bit strange.
- *
- * In the outer while loop we make at least one pass through the
- * cache, and then repeat until either all the pinned entries in
- * the ring unpin themselves, or until the number of pinned entries
- * in the ring stops declining. In this later case, we scream and die.
- *
- * Since the fractal heap can dirty, resize, and/or move entries
- * in is flush callback, it is possible that the cache will still
- * contain dirty entries at this point. If so, we must make more
- * passes through the skip list to allow it to empty.
- *
- * Further, since clean entries can be dirtied, resized, and/or moved
- * as the result of a flush call back (either the entries own, or that
- * for some other cache entry), we can no longer promise to flush
- * the cache entries in increasing address order.
- *
- * Instead, we make a pass through
- * the skip list, and then a pass through the "clean" entries, and
- * then repeating as needed. Thus it is quite possible that an
- * entry will be evicted from the cache only to be re-loaded later
- * in the flush process.
- *
- * The bottom line is that entries will probably be flushed in close
- * to increasing address order, but there are no guarantees.
- */
-
- /* compute the number of pinned entries in this ring */
- entry_ptr = cache_ptr->pel_head_ptr;
- cur_ring_pel_len = 0;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
- if (entry_ptr->ring == ring)
- cur_ring_pel_len++;
-
- entry_ptr = entry_ptr->next;
- } /* end while */
- old_ring_pel_len = cur_ring_pel_len;
-
- while (cache_ptr->index_ring_len[ring] > 0) {
- /* first, try to flush-destroy any dirty entries. Do this by
- * making a scan through the slist. Note that new dirty entries
- * may be created by the flush call backs. Thus it is possible
- * that the slist will not be empty after we finish the scan.
- */
-
-#ifdef H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C__flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
- * slist length and size before we do any flushes.
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* There is also the possibility that entries will be
- * dirtied, resized, moved, and/or removed from the cache
- * as the result of calls to the flush callbacks. We use
- * the slist_len_increase and slist_size_increase increase
- * fields in struct H5C_t to track these changes for purpose
- * of sanity checking.
- *
- * To this end, we must zero these fields before we start
- * the pass through the slist.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
-
- /* Set the cache_ptr->slist_changed to false.
- *
- * This flag is set to TRUE by H5C__flush_single_entry if the slist
- * is modified by a pre_serialize, serialize, or notify callback.
- *
- * H5C__flush_invalidate_ring() uses this flag to detect any
- * modifications to the slist that might corrupt the scan of
- * the slist -- and restart the scan in this event.
- */
- cache_ptr->slist_changed = FALSE;
-
- /* this done, start the scan of the slist */
- restart_slist_scan = TRUE;
- while (restart_slist_scan || (node_ptr != NULL)) {
- if (restart_slist_scan) {
- restart_slist_scan = FALSE;
-
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if (node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
-
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- } /* end if */
-
- entry_ptr = next_entry_ptr;
-
- /* It is possible that entries will be dirtied, resized,
- * flushed, or removed from the cache via the take ownership
- * flag as the result of pre_serialize or serialized callbacks.
- *
- * This in turn can corrupt the scan through the slist.
- *
- * We test for slist modifications in the pre_serialize
- * and serialize callbacks, and restart the scan of the
- * slist if we find them. However, best we do some extra
- * sanity checking just in case.
- */
- HDassert(entry_ptr != NULL);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->ring >= ring);
-
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if (node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else
- next_entry_ptr = NULL;
-
- /* Note that we now remove nodes from the slist as we flush
- * the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
- * the slist.
- *
- * While this optimization used to be easy, with the possibility
- * of new entries being added to the slist in the midst of the
- * flush, we must keep the slist in canonical form at all
- * times.
- */
- if (((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
- if (entry_ptr->is_protected) {
- /* We have major problems -- but lets flush
- * everything we can before we flag an error.
- */
- protected_entries++;
- } /* end if */
- else if (entry_ptr->is_pinned) {
- if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
-
- if (cache_ptr->slist_changed) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- } /* end if */
- } /* end else-if */
- else {
- if (H5C__flush_single_entry(f, entry_ptr,
- (cooked_flags | H5C__DURING_FLUSH_FLAG |
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
-
- if (cache_ptr->slist_changed) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end else */
- } /* end if */
- } /* end while loop scanning skip list */
-
-#ifdef H5C_DO_SANITY_CHECKS
- /* It is possible that entries were added to the slist during
- * the scan, either before or after scan pointer. The following
- * asserts take this into account.
- *
- * Don't bother with the sanity checks if node_ptr != NULL, as
- * in this case we broke out of the loop because it got changed
- * out from under us.
- */
-
- if (node_ptr == NULL) {
- HDassert(cache_ptr->slist_len ==
- (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
- HDassert(cache_ptr->slist_size ==
- (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
- } /* end if */
-#endif /* H5C_DO_SANITY_CHECKS */
-
- /* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain.
- *
- * It used to be that all entries remaining in the cache at
- * this point had to be clean, but with the fractal heap mods
- * this may not be the case. If so, we will flush entries out
- * in increasing address order.
- *
- * Writes to disk are possible here.
- */
-
- /* Reset the counters so that we can detect insertions, loads,
- * and moves caused by the pre_serialize and serialize calls.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- next_entry_ptr = cache_ptr->il_head;
- while (next_entry_ptr != NULL) {
- entry_ptr = next_entry_ptr;
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
-
- next_entry_ptr = entry_ptr->il_next;
- HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
-
- if (((!entry_ptr->flush_me_last) ||
- (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
-
- if (entry_ptr->is_protected) {
- /* we have major problems -- but lets flush and
- * destroy everything we can before we flag an
- * error.
- */
- protected_entries++;
-
- if (!entry_ptr->in_slist)
- HDassert(!(entry_ptr->is_dirty));
- } /* end if */
- else if (!entry_ptr->is_pinned) {
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
- * into the cache, or moved to a new location
- * in the file as a side effect of the flush.
- *
- * It's also possible that removing a clean
- * entry will remove the last child of a proxy
- * entry, allowing it to be removed also and
- * invalidating the next_entry_ptr.
- *
- * If either of these happen, and one of the target
- * or proxy entries happens to be the next entry in
- * the hash bucket, we could either find ourselves
- * either scanning a non-existent entry, scanning
- * through a different bucket, or skipping an entry.
- *
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
- * after the flush if we detect that the next_entry_ptr
- * becomes invalid.
- *
- * This is not as inefficient at it might seem,
- * as hash buckets typically have at most two
- * or three entries.
- */
- cache_ptr->entry_watched_for_removal = next_entry_ptr;
- if (H5C__flush_single_entry(f, entry_ptr,
- (cooked_flags | H5C__DURING_FLUSH_FLAG |
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
-
- /* Restart the index list scan if necessary. Must
- * do this if the next entry is evicted, and also if
- * one or more entries are inserted, loaded, or moved
- * as these operations can result in part of the scan
- * being skipped -- which can cause a spurious failure
- * if this results in the size of the pinned entry
- * failing to decline during the pass.
- */
- if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) ||
- (cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
-
- next_entry_ptr = cache_ptr->il_head;
-
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
- } /* end if */
- else
- cache_ptr->entry_watched_for_removal = NULL;
- } /* end if */
- } /* end if */
- } /* end for loop scanning hash table */
-
- /* We can't do anything if entries are pinned. The
- * hope is that the entries will be unpinned as the
- * result of destroys of entries that reference them.
- *
- * We detect this by noting the change in the number
- * of pinned entries from pass to pass. If it stops
- * shrinking before it hits zero, we scream and die.
- */
- old_ring_pel_len = cur_ring_pel_len;
- entry_ptr = cache_ptr->pel_head_ptr;
- cur_ring_pel_len = 0;
-
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
-
- if (entry_ptr->ring == ring)
- cur_ring_pel_len++;
-
- entry_ptr = entry_ptr->next;
- } /* end while */
-
- /* Check if the number of pinned entries in the ring is positive, and
- * it is not declining. Scream and die if so.
- */
- if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
- /* Don't error if allowed to have pinned entries remaining */
- if (evict_flags)
- HGOTO_DONE(TRUE)
-
- HGOTO_ERROR(
- H5E_CACHE, H5E_CANTFLUSH, FAIL,
- "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d",
- (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
- } /* end if */
-
- HDassert(protected_entries == cache_ptr->pl_len);
-
- if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
- "Only protected entries left in cache, protected_entries = %d",
- (int)protected_entries)
- } /* main while loop */
-
- /* Invariants, after destroying all entries in the ring */
- for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
- HDassert(cache_ptr->index_ring_len[i] == 0);
- HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
-
- HDassert(cache_ptr->slist_ring_len[i] == 0);
- HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
- } /* end for */
-
- HDassert(protected_entries <= cache_ptr->pl_len);
-
- if (protected_entries > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
- else if (cur_ring_pel_len > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__flush_invalidate_ring() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_ring
- *
- * Purpose: Flush the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been flushed on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be flushed. However all unprotected entries in the target
- * ring should be flushed before the function returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the slist flushing
- * entries in flush dependency order.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/1/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
-{
- H5C_t *cache_ptr = f->shared->cache;
- hbool_t flushed_entries_last_pass;
- hbool_t flush_marked_entries;
- hbool_t ignore_protected;
- hbool_t tried_to_flush_protected_entry = FALSE;
- hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
- H5SL_node_t *node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *next_entry_ptr = NULL;
-#ifdef H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- int i;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_enabled);
- HDassert(cache_ptr->slist_ptr);
- HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
- ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
- flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
-
- if (!flush_marked_entries)
- for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
- HDassert(cache_ptr->slist_ring_len[i] == 0);
-
- HDassert(cache_ptr->flush_in_progress);
-
- /* When we are only flushing marked entries, the slist will usually
- * still contain entries when we have flushed everything we should.
- * Thus we track whether we have flushed any entries in the last
- * pass, and terminate if we haven't.
- */
- flushed_entries_last_pass = TRUE;
-
- /* Set the cache_ptr->slist_changed to false.
- *
- * This flag is set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize, serialize, or notify callback.
- * H5C_flush_cache uses this flag to detect any modifications
- * to the slist that might corrupt the scan of the slist -- and
- * restart the scan in this event.
- */
- cache_ptr->slist_changed = FALSE;
-
- while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) {
- flushed_entries_last_pass = FALSE;
-
-#ifdef H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
-
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
-#endif /* H5C_DO_SANITY_CHECKS */
-
- restart_slist_scan = TRUE;
- while ((restart_slist_scan) || (node_ptr != NULL)) {
- if (restart_slist_scan) {
- restart_slist_scan = FALSE;
-
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if (node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
-
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- } /* end if */
-
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, there is code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
-
- if (!flush_marked_entries || entry_ptr->flush_marker)
- HDassert(entry_ptr->ring >= ring);
-
- /* Advance node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if (node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
-
- if (!flush_marked_entries || next_entry_ptr->flush_marker)
- HDassert(next_entry_ptr->ring >= ring);
-
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else
- next_entry_ptr = NULL;
-
- if ((!flush_marked_entries || entry_ptr->flush_marker) &&
- ((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
- (flush_marked_entries && entry_ptr->flush_marker)))) &&
- ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) &&
- (entry_ptr->ring == ring)) {
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- if (entry_ptr->is_protected) {
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
- } /* end if */
- else {
- if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
-
- if (cache_ptr->slist_changed) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
-
- flushed_entries_last_pass = TRUE;
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
-
-#ifdef H5C_DO_SANITY_CHECKS
- /* Verify that the slist size and length are as expected. */
- HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) ==
- cache_ptr->slist_len);
- HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) ==
- cache_ptr->slist_size);
-#endif /* H5C_DO_SANITY_CHECKS */
- } /* while */
-
- HDassert(protected_entries <= cache_ptr->pl_len);
-
- if (((cache_ptr->pl_len > 0) && !ignore_protected) || tried_to_flush_protected_entry)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
-
-#ifdef H5C_DO_SANITY_CHECKS
- if (!flush_marked_entries) {
- HDassert(cache_ptr->slist_ring_len[ring] == 0);
- HDassert(cache_ptr->slist_ring_size[ring] == 0);
- } /* end if */
-#endif /* H5C_DO_SANITY_CHECKS */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__flush_ring() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_single_entry
- *
- * Purpose: Flush or clear (and evict if requested) the cache entry
- * with the specified address and type. If the type is NULL,
- * any unprotected entry at the specified address will be
- * flushed (and possibly evicted).
- *
- * Attempts to flush a protected entry will result in an
- * error.
- *
- * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
- * be cleared and not flushed, and the call can't be part of a
- * sequence of flushes.
- *
- * The function does nothing silently if there is no entry
- * at the supplied address, or if the entry found has the
- * wrong type.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- * Programmer: John Mainzer, 5/5/04
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
-{
- H5C_t *cache_ptr; /* Cache for file */
- hbool_t destroy; /* external flag */
- hbool_t clear_only; /* external flag */
- hbool_t free_file_space; /* external flag */
- hbool_t take_ownership; /* external flag */
- hbool_t del_from_slist_on_destroy; /* external flag */
- hbool_t during_flush; /* external flag */
- hbool_t write_entry; /* internal flag */
- hbool_t destroy_entry; /* internal flag */
- hbool_t generate_image; /* internal flag */
- hbool_t update_page_buffer; /* internal flag */
- hbool_t was_dirty;
- hbool_t suppress_image_entry_writes = FALSE;
- hbool_t suppress_image_entry_frees = FALSE;
- haddr_t entry_addr = HADDR_UNDEF;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
- HDassert(entry_ptr->type);
-
- /* setup external flags from the flags parameter */
- destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
- clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
- free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
- take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
- del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
- during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
- generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
- update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
-
- /* Set the flag for destroying the entry, based on the 'take ownership'
- * and 'destroy' flags
- */
- if (take_ownership)
- destroy_entry = FALSE;
- else
- destroy_entry = destroy;
-
- /* we will write the entry to disk if it exists, is dirty, and if the
- * clear only flag is not set.
- */
- if (entry_ptr->is_dirty && !clear_only)
- write_entry = TRUE;
- else
- write_entry = FALSE;
-
- /* if we have received close warning, and we have been instructed to
- * generate a metadata cache image, and we have actually constructed
- * the entry images, set suppress_image_entry_frees to TRUE.
- *
- * Set suppress_image_entry_writes to TRUE if indicated by the
- * image_ctl flags.
- */
- if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image &&
- cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries != NULL) {
-
- /* Sanity checks */
- HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
- HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
- HDassert((!clear_only) || !(entry_ptr->include_in_image));
- HDassert((!take_ownership) || !(entry_ptr->include_in_image));
- HDassert((!free_file_space) || !(entry_ptr->include_in_image));
-
- suppress_image_entry_frees = TRUE;
-
- if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES)
- suppress_image_entry_writes = TRUE;
- } /* end if */
-
- /* run initial sanity checks */
-#ifdef H5C_DO_SANITY_CHECKS
- if (cache_ptr->slist_enabled) {
- if (entry_ptr->in_slist) {
- HDassert(entry_ptr->is_dirty);
- if (entry_ptr->flush_marker && !entry_ptr->is_dirty)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
- } /* end if */
- else {
- HDassert(!entry_ptr->is_dirty);
- HDassert(!entry_ptr->flush_marker);
- if (entry_ptr->is_dirty || entry_ptr->flush_marker)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
- } /* end else */
- }
- else { /* slist is disabled */
- HDassert(!entry_ptr->in_slist);
- if (!entry_ptr->is_dirty)
- if (entry_ptr->flush_marker)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?")
- }
-#endif /* H5C_DO_SANITY_CHECKS */
-
- if (entry_ptr->is_protected)
- /* Attempt to flush a protected entry -- scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
-
- /* Set entry_ptr->flush_in_progress = TRUE and set
- * entry_ptr->flush_marker = FALSE
- *
- * We will set flush_in_progress back to FALSE at the end if the
- * entry still exists at that point.
- */
- entry_ptr->flush_in_progress = TRUE;
- entry_ptr->flush_marker = FALSE;
-
- /* Preserve current dirty state for later */
- was_dirty = entry_ptr->is_dirty;
-
- /* The entry is dirty, and we are doing a flush, a flush destroy or have
- * been requested to generate an image. In those cases, serialize the
- * entry.
- */
- if (write_entry || generate_image) {
- HDassert(entry_ptr->is_dirty);
- if (NULL == entry_ptr->image_ptr) {
- if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
-
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
-
- } /* end if */
-
- if (!entry_ptr->image_up_to_date) {
- /* Sanity check */
- HDassert(!entry_ptr->prefetched);
-
- /* Generate the entry's image */
- if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
- } /* end if ( ! (entry_ptr->image_up_to_date) ) */
- } /* end if */
-
- /* Finally, write the image to disk.
- *
- * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
- * in the entry's type, we silently skip the write. This
- * flag should only be used in test code.
- */
- if (write_entry) {
- HDassert(entry_ptr->is_dirty);
-
-#ifdef H5C_DO_SANITY_CHECKS
- if (cache_ptr->check_write_permitted && !cache_ptr->write_permitted)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
-#endif /* H5C_DO_SANITY_CHECKS */
-
- /* Write the image to disk unless the write is suppressed.
- *
- * This happens if both suppress_image_entry_writes and
- * entry_ptr->include_in_image are TRUE, or if the
- * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
- * flag should only be used in test code
- */
- if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) &&
- ((entry_ptr->type->flags & H5C__CLASS_SKIP_WRITES) == 0)) {
- H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
-
-#ifdef H5_HAVE_PARALLEL
- if (cache_ptr->coll_write_list) {
- if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
- } /* end if */
- else {
-#endif /* H5_HAVE_PARALLEL */
- if (entry_ptr->prefetched) {
- HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
- } /* end if */
- else
- mem_type = entry_ptr->type->mem_type;
-
- if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
-#ifdef H5_HAVE_PARALLEL
- }
-#endif /* H5_HAVE_PARALLEL */
- } /* end if */
-
- /* if the entry has a notify callback, notify it that we have
- * just flushed the entry.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
- } /* if ( write_entry ) */
-
- /* At this point, all pre-serialize and serialize calls have been
- * made if it was appropriate to make them. Similarly, the entry
- * has been written to disk if desired.
- *
- * Thus it is now safe to update the cache data structures for the
- * flush.
- */
-
- /* start by updating the statistics */
- if (clear_only) {
- /* only log a clear if the entry was dirty */
- if (was_dirty)
- H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
- }
- else if (write_entry) {
- HDassert(was_dirty);
-
- /* only log a flush if we actually wrote to disk */
- H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
- } /* end else if */
-
- /* Note that the algorithm below is (very) similar to the set of operations
- * in H5C_remove_entry() and should be kept in sync with changes
- * to that code. - QAK, 2016/11/30
- */
-
- /* Update the cache internal data structures. */
- if (destroy) {
- /* Sanity checks */
- if (take_ownership)
- HDassert(!destroy_entry);
- else
- HDassert(destroy_entry);
-
- HDassert(!entry_ptr->is_pinned);
-
- /* Update stats, while entry is still in the cache */
- H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
-
- /* If the entry's type has a 'notify' callback and the entry is about
- * to be removed from the cache, send a 'before eviction' notice while
- * the entry is still fully integrated in the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
-
- /* Update the cache internal data structures as appropriate
- * for a destroy. Specifically:
- *
- * 1) Delete it from the index
- *
- * 2) Delete it from the skip list if requested.
- *
- * 3) Delete it from the collective read access list.
- *
- * 4) Update the replacement policy for eviction
- *
- * 5) Remove it from the tag list for this object
- *
- * Finally, if the destroy_entry flag is set, discard the
- * entry.
- */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
-
- if (entry_ptr->in_slist && del_from_slist_on_destroy)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL)
-
-#ifdef H5_HAVE_PARALLEL
- /* Check for collective read access flag */
- if (entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
-
- H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
-
- /* Remove entry from tag list */
- if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
-
- /* verify that the entry is no longer part of any flush dependencies */
- HDassert(entry_ptr->flush_dep_nparents == 0);
- HDassert(entry_ptr->flush_dep_nchildren == 0);
- } /* end if */
- else {
- HDassert(clear_only || write_entry);
- HDassert(entry_ptr->is_dirty);
- HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
-
- /* We are either doing a flush or a clear.
- *
- * A clear and a flush are the same from the point of
- * view of the replacement policy and the slist.
- * Hence no differentiation between them.
- */
- H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL)
-
- /* mark the entry as clean and update the index for
- * entry clean. Also, call the clear callback
- * if defined.
- */
- entry_ptr->is_dirty = FALSE;
-
- H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL);
-
- /* Check for entry changing status and do notifications, etc. */
- if (was_dirty) {
- /* If the entry's type has a 'notify' callback send a
- * 'entry cleaned' notice now that the entry is fully
- * integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
-
- /* Propagate the clean flag up the flush dependency chain
- * if appropriate
- */
- if (entry_ptr->flush_dep_ndirty_children != 0)
- HDassert(entry_ptr->flush_dep_ndirty_children == 0);
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag")
- } /* end if */
- } /* end else */
-
- /* reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
-
- /* capture the cache entry address for the log_flush call at the
- * end before the entry_ptr gets freed
- */
- entry_addr = entry_ptr->addr;
-
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
- *
- * Now discard the entry if appropriate.
- */
- if (destroy) {
- /* Sanity check */
- HDassert(0 == entry_ptr->flush_dep_nparents);
-
- /* if both suppress_image_entry_frees and entry_ptr->include_in_image
- * are true, simply set entry_ptr->image_ptr to NULL, as we have
- * another pointer to the buffer in an instance of H5C_image_entry_t
- * in cache_ptr->image_entries.
- *
- * Otherwise, free the buffer if it exists.
- */
- if (suppress_image_entry_frees && entry_ptr->include_in_image)
- entry_ptr->image_ptr = NULL;
- else if (entry_ptr->image_ptr != NULL)
- entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
-
- /* If the entry is not a prefetched entry, verify that the flush
- * dependency parents addresses array has been transferred.
- *
- * If the entry is prefetched, the free_isr routine will dispose of
- * the flush dependency parents addresses array if necessary.
- */
- if (!entry_ptr->prefetched) {
- HDassert(0 == entry_ptr->fd_parent_count);
- HDassert(NULL == entry_ptr->fd_parent_addrs);
- } /* end if */
-
- /* Check whether we should free the space in the file that
- * the entry occupies
- */
- if (free_file_space) {
- hsize_t fsf_size;
-
- /* Sanity checks */
- HDassert(H5F_addr_defined(entry_ptr->addr));
- HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
-#ifndef NDEBUG
- {
- size_t curr_len;
-
- /* Get the actual image size for the thing again */
- entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
- HDassert(curr_len == entry_ptr->size);
- }
-#endif
-
- /* If the file space free size callback is defined, use
- * it to get the size of the block of file space to free.
- * Otherwise use entry_ptr->size.
- */
- if (entry_ptr->type->fsf_size) {
- if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
- } /* end if */
- else /* no file space free size callback -- use entry size */
- fsf_size = entry_ptr->size;
-
- /* Release the space on disk */
- if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
- } /* end if ( free_file_space ) */
-
- /* Reset the pointer to the cache the entry is within. -QAK */
- entry_ptr->cache_ptr = NULL;
-
- /* increment entries_removed_counter and set
- * last_entry_removed_ptr. As we are likely abuut to
- * free the entry, recall that last_entry_removed_ptr
- * must NEVER be dereferenced.
- *
- * Recall that these fields are maintained to allow functions
- * that perform scans of lists of entries to detect the
- * unexpected removal of entries (via expunge, eviction,
- * or take ownership at present), so that they can re-start
- * their scans if necessary.
- *
- * Also check if the entry we are watching for removal is being
- * removed (usually the 'next' entry for an iteration) and reset
- * it to indicate that it was removed.
- */
- cache_ptr->entries_removed_counter++;
- cache_ptr->last_entry_removed_ptr = entry_ptr;
-
- if (entry_ptr == cache_ptr->entry_watched_for_removal)
- cache_ptr->entry_watched_for_removal = NULL;
-
- /* Check for actually destroying the entry in memory */
- /* (As opposed to taking ownership of it) */
- if (destroy_entry) {
- if (entry_ptr->is_dirty) {
- /* Reset dirty flag */
- entry_ptr->is_dirty = FALSE;
-
- /* If the entry's type has a 'notify' callback send a
- * 'entry cleaned' notice now that the entry is fully
- * integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
- } /* end if */
-
- /* we are about to discard the in core representation --
- * set the magic field to bad magic so we can detect a
- * freed entry if we see one.
- */
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
-
- /* verify that the image has been freed */
- HDassert(entry_ptr->image_ptr == NULL);
-
- if (entry_ptr->type->free_icr((void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
- } /* end if */
- else {
- HDassert(take_ownership);
-
- /* Client is taking ownership of the entry. Set bad magic here too
- * so the cache will choke unless the entry is re-inserted properly
- */
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
- } /* end else */
- } /* if (destroy) */
-
- /* Check if we have to update the page buffer with cleared entries
- * so it doesn't go out of date
- */
- if (update_page_buffer) {
- /* Sanity check */
- HDassert(!destroy);
- HDassert(entry_ptr->image_ptr);
-
- if (f->shared->page_buf && (f->shared->page_buf->page_size >= entry_ptr->size))
- if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size,
- entry_ptr->image_ptr) > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
- } /* end if */
-
- if (cache_ptr->log_flush)
- if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
-
-done:
- HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty));
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__flush_single_entry() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__verify_len_eoa
- *
- * Purpose: Verify that 'len' does not exceed eoa when 'actual' is
- * false i.e. 'len" is the initial speculative length from
- * get_load_size callback with null image pointer.
- * If exceed, adjust 'len' accordingly.
- *
- * Verify that 'len' should not exceed eoa when 'actual' is
- * true i.e. 'len' is the actual length from get_load_size
- * callback with non-null image pointer.
- * If exceed, return error.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: Vailin Choi
- * 9/6/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual)
-{
- H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */
- haddr_t eoa; /* End-of-allocation in the file */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
- * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
- * Thus we do the same for purposes of computing the EOA
- * for sanity checks.
- */
- cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
-
- /* Get the file's end-of-allocation value */
- eoa = H5F_get_eoa(f, cooked_type);
- if (!H5F_addr_defined(eoa))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
-
- /* Check for bad address in general */
- if (H5F_addr_gt(addr, eoa))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
-
- /* Check if the amount of data to read will be past the EOA */
- if (H5F_addr_gt((addr + *len), eoa)) {
- if (actual)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
- else
- /* Trim down the length of the metadata */
- *len = (size_t)(eoa - addr);
- } /* end if */
-
- if (*len <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__verify_len_eoa() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__load_entry
- *
- * Purpose: Attempt to load the entry at the specified disk address
- * and with the specified type into memory. If successful.
- * return the in memory address of the entry. Return NULL
- * on failure.
- *
- * Note that this function simply loads the entry into
- * core. It does not insert it into the cache.
- *
- * Return: Non-NULL on success / NULL on failure.
- *
- * Programmer: John Mainzer, 5/18/04
- *
- *-------------------------------------------------------------------------
- */
-static void *
-H5C__load_entry(H5F_t *f,
-#ifdef H5_HAVE_PARALLEL
- hbool_t coll_access,
-#endif /* H5_HAVE_PARALLEL */
- const H5C_class_t *type, haddr_t addr, void *udata)
-{
- hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
- uint8_t *image = NULL; /* Buffer for disk image */
- void *thing = NULL; /* Pointer to thing loaded */
- H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */
- size_t len; /* Size of image in file */
-#ifdef H5_HAVE_PARALLEL
- int mpi_rank = 0; /* MPI process rank */
- MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */
- int mpi_code; /* MPI error code */
-#endif /* H5_HAVE_PARALLEL */
- void *ret_value = NULL; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->cache);
- HDassert(type);
- HDassert(H5F_addr_defined(addr));
- HDassert(type->get_initial_load_size);
- if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
- HDassert(type->get_final_load_size);
- else
- HDassert(NULL == type->get_final_load_size);
- HDassert(type->deserialize);
-
- /* Can't see how skip reads could be usefully combined with
- * the speculative read flag. Hence disallow.
- */
- HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
-
- /* Call the get_initial_load_size callback, to retrieve the initial size of image */
- if (type->get_initial_load_size(udata, &len) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
- HDassert(len > 0);
-
- /* Check for possible speculative read off the end of the file */
- if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
- if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
-
- /* Allocate the buffer for reading the on-disk entry image */
- if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
-
-#ifdef H5_HAVE_PARALLEL
- if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
- if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
- if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
-
- /* Get the on-disk entry image */
- if (0 == (type->flags & H5C__CLASS_SKIP_READS)) {
- unsigned tries, max_tries; /* The # of read attempts */
- unsigned retries; /* The # of retries */
- htri_t chk_ret; /* return from verify_chksum callback */
- size_t actual_len = len; /* The actual length, after speculative reads have been resolved */
- uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */
- void *new_image; /* Pointer to image */
- hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */
-
- /* Get the # of read attempts */
- max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
-
- /*
- * This do/while loop performs the following till the metadata checksum
- * is correct or the file's number of allowed read attempts are reached.
- * --read the metadata
- * --determine the actual size of the metadata
- * --perform checksum verification
- */
- do {
- if (actual_len != len) {
- if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
- image = (uint8_t *)new_image;
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
-
-#ifdef H5_HAVE_PARALLEL
- if (!coll_access || 0 == mpi_rank) {
-#endif /* H5_HAVE_PARALLEL */
- if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) {
-#ifdef H5_HAVE_PARALLEL
- if (coll_access) {
- /* Push an error, but still participate in following MPI_Bcast */
- HDmemset(image, 0, len);
- HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
- }
- else
-#endif
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
- }
-
-#ifdef H5_HAVE_PARALLEL
- } /* end if */
- /* if the collective metadata read optimization is turned on,
- * bcast the metadata read from process 0 to all ranks in the file
- * communicator
- */
- if (coll_access) {
- int buf_size;
-
- H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
- if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
-
- /* If the entry could be read speculatively and the length is still
- * changing, check for updating the actual size
- */
- if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) {
- /* Retrieve the actual length */
- actual_len = len;
- if (type->get_final_load_size(image, len, udata, &actual_len) < 0)
- continue; /* Transfer control to while() and count towards retries */
-
- /* Check for the length changing */
- if (actual_len != len) {
- /* Verify that the length isn't past the EOA for the file */
- if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
-
- /* Expand buffer to new size */
- if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
- image = (uint8_t *)new_image;
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
-
- if (actual_len > len) {
-#ifdef H5_HAVE_PARALLEL
- if (!coll_access || 0 == mpi_rank) {
-#endif /* H5_HAVE_PARALLEL */
- /* If the thing's image needs to be bigger for a speculatively
- * loaded thing, go get the on-disk image again (the extra portion).
- */
- if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) <
- 0) {
-#ifdef H5_HAVE_PARALLEL
- if (coll_access) {
- /* Push an error, but still participate in following MPI_Bcast */
- HDmemset(image + len, 0, actual_len - len);
- HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
- }
- else
-#endif
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
- }
-
-#ifdef H5_HAVE_PARALLEL
- }
- /* If the collective metadata read optimization is turned on,
- * Bcast the metadata read from process 0 to all ranks in the file
- * communicator */
- if (coll_access) {
- int buf_size;
-
- H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
- } /* end if */
- } /* end if (actual_len != len) */
- else {
- /* The length has stabilized */
- len_changed = FALSE;
-
- /* Set the final length */
- len = actual_len;
- } /* else */
- } /* end if */
-
- /* If there's no way to verify the checksum for a piece of metadata
- * (usually because there's no checksum in the file), leave now
- */
- if (type->verify_chksum == NULL)
- break;
-
- /* Verify the checksum for the metadata image */
- if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
- if (chk_ret == TRUE)
- break;
-
- /* Sleep for some time */
- H5_nanosleep(nanosec);
- nanosec *= 2; /* Double the sleep time next time */
- } while (--tries);
-
- /* Check for too many tries */
- if (tries == 0)
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts")
-
- /* Calculate and track the # of retries */
- retries = max_tries - tries;
- if (retries) /* Does not track 0 retry */
- if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
-
- /* Set the final length (in case it wasn't set earlier) */
- len = actual_len;
- } /* end if !H5C__CLASS_SKIP_READS */
-
- /* Deserialize the on-disk image into the native memory form */
- if (NULL == (thing = type->deserialize(image, len, udata, &dirty)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
-
- entry = (H5C_cache_entry_t *)thing;
-
- /* In general, an entry should be clean just after it is loaded.
- *
- * However, when this code is used in the metadata cache, it is
- * possible that object headers will be dirty at this point, as
- * the deserialize function will alter object headers if necessary to
- * fix an old bug.
- *
- * In the following assert:
- *
- * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
- *
- * note that type ids 5 & 6 are associated with object headers in the
- * metadata cache.
- *
- * When we get to using H5C for other purposes, we may wish to
- * tighten up the assert so that the loophole only applies to the
- * metadata cache.
- */
-
- HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
-
- entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- entry->cache_ptr = f->shared->cache;
- entry->addr = addr;
- entry->size = len;
- HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
- entry->image_ptr = image;
- entry->image_up_to_date = !dirty;
- entry->type = type;
- entry->is_dirty = dirty;
- entry->dirtied = FALSE;
- entry->is_protected = FALSE;
- entry->is_read_only = FALSE;
- entry->ro_ref_count = 0;
- entry->is_pinned = FALSE;
- entry->in_slist = FALSE;
- entry->flush_marker = FALSE;
-#ifdef H5_HAVE_PARALLEL
- entry->clear_on_unprotect = FALSE;
- entry->flush_immediately = FALSE;
- entry->coll_access = coll_access;
-#endif /* H5_HAVE_PARALLEL */
- entry->flush_in_progress = FALSE;
- entry->destroy_in_progress = FALSE;
-
- entry->ring = H5C_RING_UNDEFINED;
-
- /* Initialize flush dependency fields */
- entry->flush_dep_parent = NULL;
- entry->flush_dep_nparents = 0;
- entry->flush_dep_parent_nalloc = 0;
- entry->flush_dep_nchildren = 0;
- entry->flush_dep_ndirty_children = 0;
- entry->flush_dep_nunser_children = 0;
- entry->ht_next = NULL;
- entry->ht_prev = NULL;
- entry->il_next = NULL;
- entry->il_prev = NULL;
-
- entry->next = NULL;
- entry->prev = NULL;
-
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- entry->aux_next = NULL;
- entry->aux_prev = NULL;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-
-#ifdef H5_HAVE_PARALLEL
- entry->coll_next = NULL;
- entry->coll_prev = NULL;
-#endif /* H5_HAVE_PARALLEL */
-
- /* initialize cache image related fields */
- entry->include_in_image = FALSE;
- entry->lru_rank = 0;
- entry->image_dirty = FALSE;
- entry->fd_parent_count = 0;
- entry->fd_parent_addrs = NULL;
- entry->fd_child_count = 0;
- entry->fd_dirty_child_count = 0;
- entry->image_fd_height = 0;
- entry->prefetched = FALSE;
- entry->prefetch_type_id = 0;
- entry->age = 0;
- entry->prefetched_dirty = FALSE;
-#ifndef NDEBUG /* debugging field */
- entry->serialization_count = 0;
-#endif
-
- /* initialize tag list fields */
- entry->tl_next = NULL;
- entry->tl_prev = NULL;
- entry->tag_info = NULL;
-
- H5C__RESET_CACHE_ENTRY_STATS(entry);
-
- ret_value = thing;
-
-done:
- /* Cleanup on error */
- if (NULL == ret_value) {
- /* Release resources */
- if (thing && type->free_icr(thing) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
- if (image)
- image = (uint8_t *)H5MM_xfree(image);
- } /* end if */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__load_entry() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__make_space_in_cache
- *
- * Purpose: Attempt to evict cache entries until the index_size
- * is at least needed_space below max_cache_size.
- *
- * In passing, also attempt to bring cLRU_list_size to a
- * value greater than min_clean_size.
- *
- * Depending on circumstances, both of these goals may
- * be impossible, as in parallel mode, we must avoid generating
- * a write as part of a read (to avoid deadlock in collective
- * I/O), and in all cases, it is possible (though hopefully
- * highly unlikely) that the protected list may exceed the
- * maximum size of the cache.
- *
- * Thus the function simply does its best, returning success
- * unless an error is encountered.
- *
- * Observe that this function cannot occasion a read.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 5/14/04
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
-{
- H5C_t *cache_ptr = f->shared->cache;
-#if H5C_COLLECT_CACHE_STATS
- int32_t clean_entries_skipped = 0;
- int32_t dirty_pf_entries_skipped = 0;
- int32_t total_entries_scanned = 0;
-#endif /* H5C_COLLECT_CACHE_STATS */
- uint32_t entries_examined = 0;
- uint32_t initial_list_len;
- size_t empty_space;
- hbool_t reentrant_call = FALSE;
- hbool_t prev_is_dirty = FALSE;
- hbool_t didnt_flush_entry = FALSE;
- hbool_t restart_scan;
- H5C_cache_entry_t *entry_ptr;
- H5C_cache_entry_t *prev_ptr;
- H5C_cache_entry_t *next_ptr;
-#ifndef NDEBUG
- uint32_t num_corked_entries = 0;
-#endif
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
-
- /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the make
- * space in cache process. To avoid an infinite recursion, set
- * reentrant_call to TRUE, and goto done.
- */
- if (cache_ptr->msic_in_progress) {
- reentrant_call = TRUE;
- HGOTO_DONE(SUCCEED);
- } /* end if */
-
- cache_ptr->msic_in_progress = TRUE;
-
- if (write_permitted) {
- restart_scan = FALSE;
- initial_list_len = cache_ptr->LRU_list_len;
- entry_ptr = cache_ptr->LRU_tail_ptr;
-
- if (cache_ptr->index_size >= cache_ptr->max_cache_size)
- empty_space = 0;
- else
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
-
- while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) ||
- ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) &&
- (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
-
- next_ptr = entry_ptr->next;
- prev_ptr = entry_ptr->prev;
-
- if (prev_ptr != NULL)
- prev_is_dirty = prev_ptr->is_dirty;
-
- if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
- /* Skip "dirty" corked entries. */
-#ifndef NDEBUG
- ++num_corked_entries;
-#endif
- didnt_flush_entry = TRUE;
- }
- else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress &&
- !entry_ptr->prefetched_dirty) {
- didnt_flush_entry = FALSE;
- if (entry_ptr->is_dirty) {
-#if H5C_COLLECT_CACHE_STATS
- if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size)
- cache_ptr->entries_scanned_to_make_space++;
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
-
- if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
-
- if ((cache_ptr->entries_removed_counter > 1) ||
- (cache_ptr->last_entry_removed_ptr == prev_ptr))
-
- restart_scan = TRUE;
- }
- else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
-#ifdef H5_HAVE_PARALLEL
- && !(entry_ptr->coll_access)
-#endif /* H5_HAVE_PARALLEL */
- ) {
-#if H5C_COLLECT_CACHE_STATS
- cache_ptr->entries_scanned_to_make_space++;
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- }
- else {
- /* We have enough space so don't flush clean entry. */
-#if H5C_COLLECT_CACHE_STATS
- clean_entries_skipped++;
-#endif /* H5C_COLLECT_CACHE_STATS */
- didnt_flush_entry = TRUE;
- }
-
-#if H5C_COLLECT_CACHE_STATS
- total_entries_scanned++;
-#endif /* H5C_COLLECT_CACHE_STATS */
- }
- else {
-
- /* Skip epoch markers, entries that are in the process
- * of being flushed, and entries marked as prefetched_dirty
- * (occurs in the R/O case only).
- */
- didnt_flush_entry = TRUE;
-
-#if H5C_COLLECT_CACHE_STATS
- if (entry_ptr->prefetched_dirty)
- dirty_pf_entries_skipped++;
-#endif /* H5C_COLLECT_CACHE_STATS */
- }
-
- if (prev_ptr != NULL) {
- if (didnt_flush_entry)
- /* epoch markers don't get flushed, and we don't touch
- * entries that are in the process of being flushed.
- * Hence no need for sanity checks, as we haven't
- * flushed anything. Thus just set entry_ptr to prev_ptr
- * and go on.
- */
- entry_ptr = prev_ptr;
- else if (restart_scan || prev_ptr->is_dirty != prev_is_dirty || prev_ptr->next != next_ptr ||
- prev_ptr->is_protected || prev_ptr->is_pinned) {
- /* something has happened to the LRU -- start over
- * from the tail.
- */
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
- }
- else
- entry_ptr = prev_ptr;
- }
- else
- entry_ptr = NULL;
-
- entries_examined++;
-
- if (cache_ptr->index_size >= cache_ptr->max_cache_size)
- empty_space = 0;
- else
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
-
- HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
- }
-
-#if H5C_COLLECT_CACHE_STATS
- cache_ptr->calls_to_msic++;
-
- cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
- cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
- cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
-
- if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic)
- cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
-
- if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
- cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
-
- if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic)
- cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- /* NEED: work on a better assert for corked entries */
- HDassert((entries_examined > (2 * initial_list_len)) ||
- ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
- cache_ptr->max_cache_size) ||
- ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) ||
- ((num_corked_entries)));
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-
- HDassert((entries_examined > (2 * initial_list_len)) ||
- (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size));
- HDassert((entries_examined > (2 * initial_list_len)) ||
- (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size));
-
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
- }
- else {
- HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
-
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- initial_list_len = cache_ptr->cLRU_list_len;
- entry_ptr = cache_ptr->cLRU_tail_ptr;
-
- while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
- (entries_examined <= initial_list_len) && (entry_ptr != NULL)) {
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
- HDassert(!(entry_ptr->is_dirty));
-
- prev_ptr = entry_ptr->aux_prev;
-
- if (!entry_ptr->prefetched_dirty
-#ifdef H5_HAVE_PARALLEL
- && !entry_ptr->coll_access
-#endif /* H5_HAVE_PARALLEL */
- ) {
- if (H5C__flush_single_entry(
- f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- } /* end if */
-
- /* we are scanning the clean LRU, so the serialize function
- * will not be called on any entry -- thus there is no
- * concern about the list being modified out from under
- * this function.
- */
-
- entry_ptr = prev_ptr;
- entries_examined++;
- }
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
- }
-
-done:
- /* Sanity checks */
- HDassert(cache_ptr->msic_in_progress);
- if (!reentrant_call)
- cache_ptr->msic_in_progress = FALSE;
- HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__make_space_in_cache() */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__validate_lru_list
- *
- * Purpose: Debugging function that scans the LRU list for errors.
- *
- * If an error is detected, the function generates a
- * diagnostic and returns FAIL. If no error is detected,
- * the function returns SUCCEED.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 7/14/05
- *
- *-------------------------------------------------------------------------
- */
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
-herr_t
-H5C__validate_lru_list(H5C_t *cache_ptr)
-{
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) &&
- (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list head/tail check failed")
-
- if ((cache_ptr->LRU_list_len == 1) &&
- ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
- (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed")
-
- if ((cache_ptr->LRU_list_len >= 1) &&
- ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
- (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed")
-
- entry_ptr = cache_ptr->LRU_head_ptr;
- while (entry_ptr != NULL) {
- if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
-
- if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
-
- if (entry_ptr->is_pinned || entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields")
-
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
- }
-
- if ((cache_ptr->LRU_list_len != (uint32_t)len) || (cache_ptr->LRU_list_size != size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list length/size check failed")
-
-done:
- if (ret_value != SUCCEED)
- HDassert(0);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__validate_lru_list() */
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__validate_pinned_entry_list
- *
- * Purpose: Debugging function that scans the pinned entry list for
- * errors.
- *
- * If an error is detected, the function generates a
- * diagnostic and returns FAIL. If no error is detected,
- * the function returns SUCCEED.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 4/25/14
- *
- *-------------------------------------------------------------------------
- */
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
-herr_t
-H5C__validate_pinned_entry_list(H5C_t *cache_ptr)
-{
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) &&
- (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list head/tail check failed")
-
- if ((cache_ptr->pel_len == 1) &&
- ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
- (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed")
-
- if ((cache_ptr->pel_len >= 1) &&
- ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
- (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed")
-
- entry_ptr = cache_ptr->pel_head_ptr;
- while (entry_ptr != NULL) {
- if ((entry_ptr != cache_ptr->pel_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
-
- if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
-
- if (!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list contains unpinned entry")
-
- if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields")
-
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
- }
-
- if ((cache_ptr->pel_len != (uint32_t)len) || (cache_ptr->pel_size != size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list length/size check failed")
-
-done:
- if (ret_value != SUCCEED)
- HDassert(0);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__validate_pinned_entry_list() */
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__validate_protected_entry_list
- *
- * Purpose: Debugging function that scans the protected entry list for
- * errors.
- *
- * If an error is detected, the function generates a
- * diagnostic and returns FAIL. If no error is detected,
- * the function returns SUCCEED.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: John Mainzer, 4/25/14
- *
- *-------------------------------------------------------------------------
- */
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
-herr_t
-H5C__validate_protected_entry_list(H5C_t *cache_ptr)
-{
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) &&
- (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list head/tail check failed")
-
- if ((cache_ptr->pl_len == 1) &&
- ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
- (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed")
-
- if ((cache_ptr->pl_len >= 1) &&
- ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
- (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed")
-
- entry_ptr = cache_ptr->pl_head_ptr;
- while (entry_ptr != NULL) {
- if ((entry_ptr != cache_ptr->pl_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
-
- if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
-
- if (!entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list contains unprotected entry")
-
- if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "read-only entry has non-positive ref count")
-
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
- }
-
- if ((cache_ptr->pl_len != (uint32_t)len) || (cache_ptr->pl_size != size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list length/size check failed")
-
-done:
- if (ret_value != SUCCEED)
- HDassert(0);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__validate_protected_entry_list() */
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__entry_in_skip_list
- *
- * Purpose: Debugging function that scans skip list to see if it
- * is in present. We need this, as it is possible for
- * an entry to be in the skip list twice.
- *
- * Return: FALSE if the entry is not in the skip list, and TRUE
- * if it is.
- *
- * Programmer: John Mainzer, 11/1/14
- *
- *-------------------------------------------------------------------------
- */
-#ifdef H5C_DO_SLIST_SANITY_CHECKS
-hbool_t
-H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
-{
- H5SL_node_t *node_ptr;
- hbool_t in_slist;
- hbool_t ret_value;
-
- FUNC_ENTER_PACKAGE
-
- /* Assertions */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
-
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- in_slist = FALSE;
- while ((node_ptr != NULL) && (!in_slist)) {
- H5C_cache_entry_t *entry_ptr;
-
- entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
-
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
-
- if (entry_ptr == target_ptr)
- in_slist = TRUE;
- else
- node_ptr = H5SL_next(node_ptr);
- }
-
- /* Set return value */
- ret_value = in_slist;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__entry_in_skip_list() */
-#endif /* H5C_DO_SLIST_SANITY_CHECKS */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_marked_entries
- *
- * Purpose: Flushes all marked entries in the cache.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: Mike McGreevy
- * November 3, 2010
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C__flush_marked_entries(H5F_t *f)
-{
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- /* Assertions */
- HDassert(f != NULL);
-
- /* Enable the slist, as it is needed in the flush */
- if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
-
- /* Flush all marked entries */
- if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
-
- /* Disable the slist. Set the clear_slist parameter to TRUE
- * since we called H5C_flush_cache() with the
- * H5C__FLUSH_MARKED_ENTRIES_FLAG.
- */
- if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__flush_marked_entries */
-
-/*-------------------------------------------------------------------------
- *
* Function: H5C_cork
*
* Purpose: To cork/uncork/get cork status of an object depending on "action":
@@ -7248,1051 +1493,3 @@ H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_cork() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_dirty()
- *
- * Purpose: Recursively propagate the flush_dep_ndirty_children flag
- * up the dependency chain in response to entry either
- * becoming dirty or having its flush_dep_ndirty_children
- * increased from 0.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Neil Fortner
- * 11/13/12
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry)
-{
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(entry);
-
- /* Iterate over the parent entries, if any */
- for (u = 0; u < entry->flush_dep_nparents; u++) {
- /* Sanity check */
- HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children <
- entry->flush_dep_parent[u]->flush_dep_nchildren);
-
- /* Adjust the parent's number of dirty children */
- entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
-
- /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
- if (entry->flush_dep_parent[u]->type->notify &&
- (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED,
- entry->flush_dep_parent[u]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag set")
- } /* end for */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__mark_flush_dep_dirty() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_clean()
- *
- * Purpose: Recursively propagate the flush_dep_ndirty_children flag
- * up the dependency chain in response to entry either
- * becoming clean or having its flush_dep_ndirty_children
- * reduced to 0.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Neil Fortner
- * 11/13/12
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry)
-{
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(entry);
-
- /* Iterate over the parent entries, if any */
- /* Note reverse iteration order, in case the callback removes the flush
- * dependency - QAK, 2017/08/12
- */
- for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
- /* Sanity check */
- HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
-
- /* Adjust the parent's number of dirty children */
- entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
-
- /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
- if (entry->flush_dep_parent[i]->type->notify &&
- (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED,
- entry->flush_dep_parent[i]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag reset")
- } /* end for */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__mark_flush_dep_clean() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_serialized()
- *
- * Purpose: Decrement the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
- * the target entry becoming serialized.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 8/30/16
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr)
-{
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(entry_ptr);
-
- /* Iterate over the parent entries, if any */
- /* Note reverse iteration order, in case the callback removes the flush
- * dependency - QAK, 2017/08/12
- */
- for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
- /* Sanity checks */
- HDassert(entry_ptr->flush_dep_parent);
- HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
-
- /* decrement the parents number of unserialized children */
- entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
-
- /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
- if (entry_ptr->flush_dep_parent[i]->type->notify &&
- (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED,
- entry_ptr->flush_dep_parent[i]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag set")
- } /* end for */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__mark_flush_dep_serialized() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_unserialized()
- *
- * Purpose: Increment the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
- * the target entry becoming unserialized.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 8/30/16
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr)
-{
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(entry_ptr);
-
- /* Iterate over the parent entries, if any */
- for (u = 0; u < entry_ptr->flush_dep_nparents; u++) {
- /* Sanity check */
- HDassert(entry_ptr->flush_dep_parent);
- HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
- entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
-
- /* increment parents number of usserialized children */
- entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
-
- /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
- if (entry_ptr->flush_dep_parent[u]->type->notify &&
- (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED,
- entry_ptr->flush_dep_parent[u]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag reset")
- } /* end for */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__mark_flush_dep_unserialized() */
-
-#ifndef NDEBUG
-/*-------------------------------------------------------------------------
- * Function: H5C__assert_flush_dep_nocycle()
- *
- * Purpose: Assert recursively that base_entry is not the same as
- * entry, and perform the same assertion on all of entry's
- * flush dependency parents. This is used to detect cycles
- * created by flush dependencies.
- *
- * Return: void
- *
- * Programmer: Neil Fortner
- * 12/10/12
- *
- *-------------------------------------------------------------------------
- */
-static void
-H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry)
-{
- unsigned u; /* Local index variable */
-
- FUNC_ENTER_PACKAGE_NOERR
-
- /* Sanity checks */
- HDassert(entry);
- HDassert(base_entry);
-
- /* Make sure the entries are not the same */
- HDassert(base_entry != entry);
-
- /* Iterate over entry's parents (if any) */
- for (u = 0; u < entry->flush_dep_nparents; u++)
- H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
-
- FUNC_LEAVE_NOAPI_VOID
-} /* H5C__assert_flush_dep_nocycle() */
-#endif
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_cache
- *
- * Purpose: Serialize (i.e. construct an on disk image) for all entries
- * in the metadata cache including clean entries.
- *
- * Note that flush dependencies and "flush me last" flags
- * must be observed in the serialization process.
- *
- * Note also that entries may be loaded, flushed, evicted,
- * expunged, relocated, resized, or removed from the cache
- * during this process, just as these actions may occur during
- * a regular flush.
- *
- * However, we are given that the cache will contain no protected
- * entries on entry to this routine (although entries may be
- * briefly protected and then unprotected during the serialize
- * process).
- *
- * The objective of this routine is serialize all entries and
- * to force all entries into their actual locations on disk.
- *
- * The initial need for this routine is to settle all entries
- * in the cache prior to construction of the metadata cache
- * image so that the size of the cache image can be calculated.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 7/22/15
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C__serialize_cache(H5F_t *f)
-{
-#ifdef H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
- H5C_t *cache_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
-
-#ifdef H5C_DO_SANITY_CHECKS
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
-
- for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
-
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
-
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
-#endif /* H5C_DO_SANITY_CHECKS */
-
-#ifdef H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
- H5C__validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
-#ifndef NDEBUG
- /* if this is a debug build, set the serialization_count field of
- * each entry in the cache to zero before we start the serialization.
- * This allows us to detect the case in which any entry is serialized
- * more than once (a performance issues), and more importantly, the
- * case is which any flush dependency parent is serializes more than
- * once (a correctness issue).
- */
- {
- H5C_cache_entry_t *scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while (scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- scan_ptr->serialization_count = 0;
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif
-
- /* set cache_ptr->serialization_in_progress to TRUE, and back
- * to FALSE at the end of the function. Must maintain this flag
- * to support H5C_get_serialization_in_progress(), which is in
- * turn required to support sanity checking in some cache
- * clients.
- */
- HDassert(!cache_ptr->serialization_in_progress);
- cache_ptr->serialization_in_progress = TRUE;
-
- /* Serialize each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
- while (ring < H5C_RING_NTYPES) {
- HDassert(cache_ptr->close_warning_received);
- switch (ring) {
- case H5C_RING_USER:
- break;
-
- case H5C_RING_RDFSM:
- /* Settle raw data FSM */
- if (!cache_ptr->rdfsm_settled)
- if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
- break;
-
- case H5C_RING_MDFSM:
- /* Settle metadata FSM */
- if (!cache_ptr->mdfsm_settled)
- if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
- break;
-
- case H5C_RING_SBE:
- case H5C_RING_SB:
- break;
-
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
- break;
- } /* end switch */
-
- if (H5C__serialize_ring(f, ring) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
-
- ring++;
- } /* end while */
-
-#ifndef NDEBUG
- /* Verify that no entry has been serialized more than once.
- * FD parents with multiple serializations should have been caught
- * elsewhere, so no specific check for them here.
- */
- {
- H5C_cache_entry_t *scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while (scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(scan_ptr->serialization_count <= 1);
-
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif
-
-done:
- cache_ptr->serialization_in_progress = FALSE;
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_cache() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_ring
- *
- * Purpose: Serialize the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been serialized on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
- * returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the index list
- * serializing entries in flush dependency order.
- *
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
- * and below are marked for exclusion from the image.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/11/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
-{
- hbool_t done = FALSE;
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
- HDassert(cache_ptr->serialization_in_progress);
-
- /* The objective here is to serialize all entries in the cache ring
- * in flush dependency order.
- *
- * The basic algorithm is to scan the cache index list looking for
- * unserialized entries that are either not in a flush dependency
- * relationship, or which have no unserialized children. Any such
- * entry is serialized and its flush dependency parents (if any) are
- * informed -- allowing them to decrement their userialized child counts.
- *
- * However, this algorithm is complicated by the ability
- * of client serialization callbacks to perform operations on
- * on the cache which can result in the insertion, deletion,
- * relocation, resize, dirty, flush, eviction, or removal (via the
- * take ownership flag) of entries. Changes in the flush dependency
- * structure are also possible.
- *
- * On the other hand, the algorithm is simplified by the fact that
- * we are serializing, not flushing. Thus, as long as all entries
- * are serialized correctly, it doesn't matter if we have to go back
- * and serialize an entry a second time.
- *
- * These possible actions result in the following modifications to
- * the basic algorithm:
- *
- * 1) In the event of an entry expunge, eviction or removal, we must
- * restart the scan as it is possible that the next entry in our
- * scan is no longer in the cache. Were we to examine this entry,
- * we would be accessing deallocated memory.
- *
- * 2) A resize, dirty, or insertion of an entry may result in the
- * the increment of a flush dependency parent's dirty and/or
- * unserialized child count. In the context of serializing the
- * the cache, this is a non-issue, as even if we have already
- * serialized the parent, it will be marked dirty and its image
- * marked out of date if appropriate when the child is serialized.
- *
- * However, this is a major issue for a flush, as were this to happen
- * in a flush, it would violate the invariant that the flush dependency
- * feature is intended to enforce. As the metadata cache has no
- * control over the behavior of cache clients, it has no way of
- * preventing this behaviour. However, it should detect it if at all
- * possible.
- *
- * Do this by maintaining a count of the number of times each entry is
- * serialized during a cache serialization. If any flush dependency
- * parent is serialized more than once, throw an assertion failure.
- *
- * 3) An entry relocation will typically change the location of the
- * entry in the index list. This shouldn't cause problems as we
- * will scan the index list until we make a complete pass without
- * finding anything to serialize -- making relocations of either
- * the current or next entries irrelevant.
- *
- * Note that since a relocation may result in our skipping part of
- * the index list, we must always do at least one more pass through
- * the index list after an entry relocation.
- *
- * 4) Changes in the flush dependency structure are possible on
- * entry insertion, load, expunge, evict, or remove. Destruction
- * of a flush dependency has no effect, as it can only relax the
- * flush dependencies. Creation of a flush dependency can create
- * an unserialized child of a flush dependency parent where all
- * flush dependency children were previously serialized. Should
- * this child dirty the flush dependency parent when it is serialized,
- * the parent will be re-serialized.
- *
- * Per the discussion of 2) above, this is a non issue for cache
- * serialization, and a major problem for cache flush. Using the
- * same detection mechanism, throw an assertion failure if this
- * condition appears.
- *
- * Observe that either eviction or removal of entries as a result of
- * a serialization is not a problem as long as the flush dependency
- * tree does not change beyond the removal of a leaf.
- */
- while (!done) {
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- done = TRUE; /* set to FALSE if any activity in inner loop */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Verify that either the entry is already serialized, or
- * that it is assigned to either the target or an inner
- * ring.
- */
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- /* Skip flush me last entries or inner ring entries */
- if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
-
- /* if we encounter an unserialized entry in the current
- * ring that is not marked flush me last, we are not done.
- */
- if (!entry_ptr->image_up_to_date)
- done = FALSE;
-
- /* Serialize the entry if its image is not up to date
- * and it has no unserialized flush dependency children.
- */
- if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
- HDassert(entry_ptr->serialization_count == 0);
-
- /* Serialize the entry */
- if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif
- } /* end if */
- } /* end if */
-
- /* Check for the cache being perturbed during the entry serialize */
- if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
-
-#if H5C_COLLECT_CACHE_STATS
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- /* Reset the counters */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* Restart scan */
- entry_ptr = cache_ptr->il_head;
- } /* end if */
- else
- /* Advance to next entry */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
- } /* while ( ! done ) */
-
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- /* At this point, all entries not marked "flush me last" and in
- * the current ring or outside it should be serialized and have up
- * to date images. Scan the index list again to serialize the
- * "flush me last" entries (if they are in the current ring) and to
- * verify that all other entries have up to date images.
- */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
- HDassert(entry_ptr->ring < H5C_RING_NTYPES);
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
-
- if (entry_ptr->ring == ring) {
- if (entry_ptr->flush_me_last) {
- if (!entry_ptr->image_up_to_date) {
- HDassert(entry_ptr->serialization_count == 0);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- /* Serialize the entry */
- if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- /* Check for the cache changing */
- if ((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flush_me_last entry serialization triggered restart")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif
- } /* end if */
- } /* end if */
- else {
- HDassert(entry_ptr->image_up_to_date);
- HDassert(entry_ptr->serialization_count <= 1);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- } /* end else */
- } /* if ( entry_ptr->ring == ring ) */
-
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
-
-done:
- HDassert(cache_ptr->serialization_in_progress);
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_ring() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__serialize_single_entry
- *
- * Purpose: Serialize the cache entry pointed to by the entry_ptr
- * parameter.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer, 7/24/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(!entry_ptr->flush_in_progress);
- HDassert(entry_ptr->type);
-
- /* Set entry_ptr->flush_in_progress to TRUE so the target entry
- * will not be evicted out from under us. Must set it back to FALSE
- * when we are done.
- */
- entry_ptr->flush_in_progress = TRUE;
-
- /* Allocate buffer for the entry image if required. */
- if (NULL == entry_ptr->image_ptr) {
- HDassert(entry_ptr->size > 0);
- if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
-
- /* Generate image for entry */
- if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
-
- /* Reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
-
-done:
- HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__serialize_single_entry() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C__generate_image
- *
- * Purpose: Serialize an entry and generate its image.
- *
- * Note: This may cause the entry to be re-sized and/or moved in
- * the cache.
- *
- * As we will not update the metadata cache's data structures
- * until we we finish the write, we must touch up these
- * data structures for size and location changes even if we
- * are about to delete the entry from the cache (i.e. on a
- * flush destroy).
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Mohamad Chaarawi
- * 2/10/16
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
-{
- haddr_t new_addr = HADDR_UNDEF;
- haddr_t old_addr = HADDR_UNDEF;
- size_t new_len = 0;
- unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity check */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(entry_ptr->type);
-
- /* make note of the entry's current address */
- old_addr = entry_ptr->addr;
-
- /* Call client's pre-serialize callback, if there's one */
- if ((entry_ptr->type->pre_serialize) &&
- ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr,
- &new_len, &serialize_flags) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
-
- /* Check for any flags set in the pre-serialize callback */
- if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
- /* Check for unexpected flags from serialize callback */
- if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
-
-#ifdef H5_HAVE_PARALLEL
- /* In the parallel case, resizes and moves in
- * the serialize operation can cause problems.
- * If they occur, scream and die.
- *
- * At present, in the parallel case, the aux_ptr
- * will only be set if there is more than one
- * process. Thus we can use this to detect
- * the parallel case.
- *
- * This works for now, but if we start using the
- * aux_ptr for other purposes, we will have to
- * change this test accordingly.
- *
- * NB: While this test detects entryies that attempt
- * to resize or move themselves during a flush
- * in the parallel case, it will not detect an
- * entry that dirties, resizes, and/or moves
- * other entries during its flush.
- */
- if (cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
-#endif
-
- /* If required, resize the buffer and update the entry and the cache
- * data structures
- */
- if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
- /* Sanity check */
- HDassert(new_len > 0);
-
- /* Allocate a new image buffer */
- if (NULL ==
- (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
-
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
-
- /* Update statistics for resizing the entry */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
-
- /* Update the hash table for the size change */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr,
- !entry_ptr->is_dirty, FAIL);
-
- /* The entry can't be protected since we are in the process of
- * flushing it. Thus we must update the replacement policy data
- * structures for the size change. The macro deals with the pinned
- * case.
- */
- H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len, FAIL);
-
- /* As we haven't updated the cache data structures for
- * for the flush or flush destroy yet, the entry should
- * be in the slist if the slist is enabled. Since
- * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the
- * slist is enabled, call it un-conditionally.
- */
- HDassert(entry_ptr->is_dirty);
- HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled));
-
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
-
- /* Finally, update the entry for its new size */
- entry_ptr->size = new_len;
- } /* end if */
-
- /* If required, udate the entry and the cache data structures
- * for a move
- */
- if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
- /* Update stats and entries relocated counter */
- H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
-
- /* We must update cache data structures for the change in address */
- if (entry_ptr->addr == old_addr) {
- /* Delete the entry from the hash table and the slist */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
-
- /* Update the entry for its new address */
- entry_ptr->addr = new_addr;
-
- /* And then reinsert in the index and slist */
- H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
- } /* end if */
- else /* move is already done for us -- just do sanity checks */
- HDassert(entry_ptr->addr == new_addr);
- } /* end if */
- } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
-
- /* Serialize object into buffer */
- if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
-
-#if H5C_DO_MEMORY_SANITY_CHECKS
- HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE));
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
-
- entry_ptr->image_up_to_date = TRUE;
-
- /* Propagate the fact that the entry is serialized up the
- * flush dependency chain if appropriate. Since the image must
- * have been out of date for this function to have been called
- * (see assertion on entry), no need to check that -- only check
- * for flush dependency parents.
- */
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_serialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__generate_image */
-
-/*-------------------------------------------------------------------------
- *
- * Function: H5C_remove_entry
- *
- * Purpose: Remove an entry from the cache. Must be not protected, pinned,
- * dirty, involved in flush dependencies, etc.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * September 17, 2016
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_remove_entry(void *_entry)
-{
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */
- H5C_t *cache; /* Cache for file */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry);
- HDassert(entry->ring != H5C_RING_UNDEFINED);
- cache = entry->cache_ptr;
- HDassert(cache);
- HDassert(cache->magic == H5C__H5C_T_MAGIC);
-
- /* Check for error conditions */
- if (entry->is_dirty)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
- if (entry->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
- if (entry->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
- /* NOTE: If these two errors are getting tripped because the entry is
- * in a flush dependency with a freedspace entry, move the checks
- * after the "before evict" message is sent, and add the
- * "child being evicted" message to the "before evict" notify
- * section below. QAK - 2017/08/03
- */
- if (entry->flush_dep_nparents > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
- "can't remove entry with flush dependency parents from cache")
- if (entry->flush_dep_nchildren > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
- "can't remove entry with flush dependency children from cache")
-
- /* Additional internal cache consistency checks */
- HDassert(!entry->in_slist);
- HDassert(!entry->flush_marker);
- HDassert(!entry->flush_in_progress);
-
- /* Note that the algorithm below is (very) similar to the set of operations
- * in H5C__flush_single_entry() and should be kept in sync with changes
- * to that code. - QAK, 2016/11/30
- */
-
- /* Update stats, as if we are "destroying" and taking ownership of the entry */
- H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
-
- /* If the entry's type has a 'notify' callback, send a 'before eviction'
- * notice while the entry is still fully integrated in the cache.
- */
- if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
-
- /* Update the cache internal data structures as appropriate for a destroy.
- * Specifically:
- * 1) Delete it from the index
- * 2) Delete it from the collective read access list
- * 3) Update the replacement policy for eviction
- * 4) Remove it from the tag list for this object
- */
-
- H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
-
-#ifdef H5_HAVE_PARALLEL
- /* Check for collective read access flag */
- if (entry->coll_access) {
- entry->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
-
- H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
-
- /* Remove entry from tag list */
- if (H5C__untag_entry(cache, entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
-
- /* Increment entries_removed_counter and set last_entry_removed_ptr.
- * As we me be about to free the entry, recall that last_entry_removed_ptr
- * must NEVER be dereferenced.
- *
- * Recall that these fields are maintained to allow functions that perform
- * scans of lists of entries to detect the unexpected removal of entries
- * (via expunge, eviction, or take ownership at present), so that they can
- * re-start their scans if necessary.
- *
- * Also check if the entry we are watching for removal is being
- * removed (usually the 'next' entry for an iteration) and reset
- * it to indicate that it was removed.
- */
- cache->entries_removed_counter++;
- cache->last_entry_removed_ptr = entry;
- if (entry == cache->entry_watched_for_removal)
- cache->entry_watched_for_removal = NULL;
-
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
- *
- * Now clean up internal cache fields if appropriate.
- */
-
- /* Free the buffer for the on disk image */
- if (entry->image_ptr != NULL)
- entry->image_ptr = H5MM_xfree(entry->image_ptr);
-
- /* Reset the pointer to the cache the entry is within */
- entry->cache_ptr = NULL;
-
- /* Client is taking ownership of the entry. Set bad magic here so the
- * cache will choke unless the entry is re-inserted properly
- */
- entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__remove_entry() */
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index ed95bcd..b7c05d0 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -247,7 +247,6 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
#endif /* NDEBUG */
/*-------------------------------------------------------------------------
- *
* Function: H5C_dump_cache_skip_list
*
* Purpose: Debugging routine that prints a summary of the contents of
@@ -697,7 +696,6 @@ done:
} /* H5C_stats() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_stats__reset
*
* Purpose: Reset the stats fields to their initial values.
@@ -713,7 +711,7 @@ void
H5C_stats__reset(H5C_t *cache_ptr)
#else /* NDEBUG */
#if H5C_COLLECT_CACHE_STATS
-H5C_stats__reset(H5C_t *cache_ptr)
+H5C_stats__reset(H5C_t *cache_ptr)
#else /* H5C_COLLECT_CACHE_STATS */
H5C_stats__reset(H5C_t H5_ATTR_UNUSED *cache_ptr)
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -884,7 +882,6 @@ done:
#endif /* NDEBUG */
/*-------------------------------------------------------------------------
- *
* Function: H5C_validate_index_list
*
* Purpose: Debugging function that scans the index list for errors.
@@ -1000,7 +997,6 @@ done:
#endif /* NDEBUG */
/*-------------------------------------------------------------------------
- *
* Function: H5C_get_entry_ptr_from_addr()
*
* Purpose: Debugging function that attempts to look up an entry in the
@@ -1097,7 +1093,6 @@ H5C_get_serialization_in_progress(const H5C_t *cache_ptr)
#endif /* NDEBUG */
/*-------------------------------------------------------------------------
- *
* Function: H5C_cache_is_clean()
*
* Purpose: Debugging function that verifies that all rings in the
@@ -1141,7 +1136,6 @@ done:
#endif /* NDEBUG */
/*-------------------------------------------------------------------------
- *
* Function: H5C_verify_entry_type()
*
* Purpose: Debugging function that attempts to look up an entry in the
@@ -1203,3 +1197,478 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_verify_entry_type() */
#endif /* NDEBUG */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_def_auto_resize_rpt_fcn
+ *
+ * Purpose: Print results of a automatic cache resize.
+ *
+ * This function should only be used where HDprintf() behaves
+ * well -- i.e. not on Windows.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/27/04
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr,
+#ifndef NDEBUG
+ int32_t version,
+#else
+ int32_t H5_ATTR_UNUSED version,
+#endif
+ double hit_rate, enum H5C_resize_status status, size_t old_max_cache_size,
+ size_t new_max_cache_size, size_t old_min_clean_size, size_t new_min_clean_size)
+{
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(version == H5C__CURR_AUTO_RESIZE_RPT_FCN_VER);
+
+ switch (status) {
+ case in_spec:
+ HDfprintf(stdout, "%sAuto cache resize -- no change. (hit rate = %lf)\n", cache_ptr->prefix,
+ hit_rate);
+ break;
+
+ case increase:
+ HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold);
+ HDassert(old_max_cache_size < new_max_cache_size);
+
+ HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold);
+ HDfprintf(stdout, "%scache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
+ old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
+ break;
+
+ case flash_increase:
+ HDassert(old_max_cache_size < new_max_cache_size);
+
+ HDfprintf(stdout, "%sflash cache resize(%d) -- size threshold = %zu.\n", cache_ptr->prefix,
+ (int)(cache_ptr->resize_ctl.flash_incr_mode), cache_ptr->flash_size_increase_threshold);
+ HDfprintf(stdout, "%s cache size increased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
+ old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
+ break;
+
+ case decrease:
+ HDassert(old_max_cache_size > new_max_cache_size);
+
+ switch (cache_ptr->resize_ctl.decr_mode) {
+ case H5C_decr__off:
+ HDfprintf(stdout, "%sAuto cache resize -- decrease off. HR = %lf\n", cache_ptr->prefix,
+ hit_rate);
+ break;
+
+ case H5C_decr__threshold:
+ HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold);
+
+ HDfprintf(stdout, "%sAuto cache resize -- decrease by threshold. HR = %lf > %6.5lf\n",
+ cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold);
+ HDfprintf(stdout, "%sout of bounds high (%6.5lf).\n", cache_ptr->prefix,
+ cache_ptr->resize_ctl.upper_hr_threshold);
+ break;
+
+ case H5C_decr__age_out:
+ HDfprintf(stdout, "%sAuto cache resize -- decrease by ageout. HR = %lf\n",
+ cache_ptr->prefix, hit_rate);
+ break;
+
+ case H5C_decr__age_out_with_threshold:
+ HDassert(hit_rate > cache_ptr->resize_ctl.upper_hr_threshold);
+
+ HDfprintf(stdout,
+ "%sAuto cache resize -- decrease by ageout with threshold. HR = %lf > %6.5lf\n",
+ cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.upper_hr_threshold);
+ break;
+
+ default:
+ HDfprintf(stdout, "%sAuto cache resize -- decrease by unknown mode. HR = %lf\n",
+ cache_ptr->prefix, hit_rate);
+ }
+
+ HDfprintf(stdout, "%s cache size decreased from (%zu/%zu) to (%zu/%zu).\n", cache_ptr->prefix,
+ old_max_cache_size, old_min_clean_size, new_max_cache_size, new_min_clean_size);
+ break;
+
+ case at_max_size:
+ HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold);
+ HDfprintf(stdout, "%s cache already at maximum size so no change.\n", cache_ptr->prefix);
+ break;
+
+ case at_min_size:
+ HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) -- can't decrease.\n", cache_ptr->prefix,
+ hit_rate);
+ HDfprintf(stdout, "%s cache already at minimum size.\n", cache_ptr->prefix);
+ break;
+
+ case increase_disabled:
+ HDfprintf(stdout, "%sAuto cache resize -- increase disabled -- HR = %lf.", cache_ptr->prefix,
+ hit_rate);
+ break;
+
+ case decrease_disabled:
+ HDfprintf(stdout, "%sAuto cache resize -- decrease disabled -- HR = %lf.\n", cache_ptr->prefix,
+ hit_rate);
+ break;
+
+ case not_full:
+ HDassert(hit_rate < cache_ptr->resize_ctl.lower_hr_threshold);
+
+ HDfprintf(stdout, "%sAuto cache resize -- hit rate (%lf) out of bounds low (%6.5lf).\n",
+ cache_ptr->prefix, hit_rate, cache_ptr->resize_ctl.lower_hr_threshold);
+ HDfprintf(stdout, "%s cache not full so no increase in size.\n", cache_ptr->prefix);
+ break;
+
+ default:
+ HDfprintf(stdout, "%sAuto cache resize -- unknown status code.\n", cache_ptr->prefix);
+ break;
+ }
+} /* H5C_def_auto_resize_rpt_fcn() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__validate_lru_list
+ *
+ * Purpose: Debugging function that scans the LRU list for errors.
+ *
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 7/14/05
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+herr_t
+H5C__validate_lru_list(H5C_t *cache_ptr)
+{
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) &&
+ (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list head/tail check failed")
+
+ if ((cache_ptr->LRU_list_len == 1) &&
+ ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
+ (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed")
+
+ if ((cache_ptr->LRU_list_len >= 1) &&
+ ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
+ (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list sanity check failed")
+
+ entry_ptr = cache_ptr->LRU_head_ptr;
+ while (entry_ptr != NULL) {
+ if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
+
+ if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
+
+ if (entry_ptr->is_pinned || entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields")
+
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
+
+ if ((cache_ptr->LRU_list_len != (uint32_t)len) || (cache_ptr->LRU_list_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU list length/size check failed")
+
+done:
+ if (ret_value != SUCCEED)
+ HDassert(0);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__validate_lru_list() */
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__validate_pinned_entry_list
+ *
+ * Purpose: Debugging function that scans the pinned entry list for
+ * errors.
+ *
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 4/25/14
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+herr_t
+H5C__validate_pinned_entry_list(H5C_t *cache_ptr)
+{
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) &&
+ (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list head/tail check failed")
+
+ if ((cache_ptr->pel_len == 1) &&
+ ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
+ (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed")
+
+ if ((cache_ptr->pel_len >= 1) &&
+ ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
+ (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list sanity check failed")
+
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while (entry_ptr != NULL) {
+ if ((entry_ptr != cache_ptr->pel_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
+
+ if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
+
+ if (!entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list contains unpinned entry")
+
+ if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "invalid entry 'pin origin' fields")
+
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
+
+ if ((cache_ptr->pel_len != (uint32_t)len) || (cache_ptr->pel_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pinned list length/size check failed")
+
+done:
+ if (ret_value != SUCCEED)
+ HDassert(0);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__validate_pinned_entry_list() */
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__validate_protected_entry_list
+ *
+ * Purpose: Debugging function that scans the protected entry list for
+ * errors.
+ *
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 4/25/14
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+herr_t
+H5C__validate_protected_entry_list(H5C_t *cache_ptr)
+{
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) &&
+ (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list head/tail check failed")
+
+ if ((cache_ptr->pl_len == 1) &&
+ ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
+ (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed")
+
+ if ((cache_ptr->pl_len >= 1) &&
+ ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
+ (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list sanity check failed")
+
+ entry_ptr = cache_ptr->pl_head_ptr;
+ while (entry_ptr != NULL) {
+ if ((entry_ptr != cache_ptr->pl_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
+
+ if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry has bad prev/next pointers")
+
+ if (!entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list contains unprotected entry")
+
+ if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "read-only entry has non-positive ref count")
+
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
+
+ if ((cache_ptr->pl_len != (uint32_t)len) || (cache_ptr->pl_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "protected list length/size check failed")
+
+done:
+ if (ret_value != SUCCEED)
+ HDassert(0);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__validate_protected_entry_list() */
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__entry_in_skip_list
+ *
+ * Purpose: Debugging function that scans skip list to see if it
+ * is in present. We need this, as it is possible for
+ * an entry to be in the skip list twice.
+ *
+ * Return: FALSE if the entry is not in the skip list, and TRUE
+ * if it is.
+ *
+ * Programmer: John Mainzer, 11/1/14
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5C_DO_SLIST_SANITY_CHECKS
+hbool_t
+H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
+{
+ H5SL_node_t *node_ptr;
+ hbool_t in_slist;
+ hbool_t ret_value;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Assertions */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ in_slist = FALSE;
+ while ((node_ptr != NULL) && (!in_slist)) {
+ H5C_cache_entry_t *entry_ptr;
+
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->in_slist);
+
+ if (entry_ptr == target_ptr)
+ in_slist = TRUE;
+ else
+ node_ptr = H5SL_next(node_ptr);
+ }
+
+ /* Set return value */
+ ret_value = in_slist;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__entry_in_skip_list() */
+#endif /* H5C_DO_SLIST_SANITY_CHECKS */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__image_stats
+ *
+ * Purpose: Prints statistics specific to the cache image.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/26/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+#if H5C_COLLECT_CACHE_STATS
+H5C__image_stats(H5C_t *cache_ptr, hbool_t print_header)
+#else /* H5C_COLLECT_CACHE_STATS */
+H5C__image_stats(H5C_t *cache_ptr, hbool_t H5_ATTR_UNUSED print_header)
+#endif /* H5C_COLLECT_CACHE_STATS */
+{
+#if H5C_COLLECT_CACHE_STATS
+ int i;
+ int64_t total_hits = 0;
+ int64_t total_misses = 0;
+ double hit_rate;
+ double prefetch_use_rate;
+#endif /* H5C_COLLECT_CACHE_STATS */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if (!cache_ptr || cache_ptr->magic != H5C__H5C_T_MAGIC)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+
+#if H5C_COLLECT_CACHE_STATS
+ for (i = 0; i <= cache_ptr->max_type_id; i++) {
+ total_hits += cache_ptr->hits[i];
+ total_misses += cache_ptr->misses[i];
+ } /* end for */
+
+ if ((total_hits > 0) || (total_misses > 0))
+ hit_rate = 100.0 * ((double)(total_hits)) / ((double)(total_hits + total_misses));
+ else
+ hit_rate = 0.0;
+
+ if (cache_ptr->prefetches > 0)
+ prefetch_use_rate = 100.0 * ((double)(cache_ptr->prefetch_hits)) / ((double)(cache_ptr->prefetches));
+ else
+ prefetch_use_rate = 0.0;
+
+ if (print_header) {
+ HDfprintf(stdout, "\nhit prefetches prefetch image pf hit\n");
+ HDfprintf(stdout, "rate: total: dirty: hits: flshs: evct: size: rate:\n");
+ } /* end if */
+
+ HDfprintf(stdout, "%3.1lf %5lld %5lld %5lld %5lld %5lld %5lld %3.1lf\n", hit_rate,
+ (long long)(cache_ptr->prefetches), (long long)(cache_ptr->dirty_prefetches),
+ (long long)(cache_ptr->prefetch_hits),
+ (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]),
+ (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]),
+ (long long)(cache_ptr->last_image_size), prefetch_use_rate);
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__image_stats() */
diff --git a/src/H5Centry.c b/src/H5Centry.c
new file mode 100644
index 0000000..e573561
--- /dev/null
+++ b/src/H5Centry.c
@@ -0,0 +1,4363 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Centry.c
+ * May 8 2023
+ * Quincey Koziol
+ *
+ * Purpose: Routines which operate on cache entries.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Cmodule.h" /* This source code file is part of the H5C module */
+#define H5F_FRIEND /* suppress error about including H5Fpkg */
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Cpkg.h" /* Cache */
+#include "H5CXprivate.h" /* API Contexts */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5MFprivate.h" /* File memory management */
+#include "H5MMprivate.h" /* Memory management */
+
+/****************/
+/* Local Macros */
+/****************/
+#if H5C_DO_MEMORY_SANITY_CHECKS
+#define H5C_IMAGE_EXTRA_SPACE 8
+#define H5C_IMAGE_SANITY_VALUE "DeadBeef"
+#else /* H5C_DO_MEMORY_SANITY_CHECKS */
+#define H5C_IMAGE_EXTRA_SPACE 0
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/* Alias for pointer to cache entry, for use when allocating sequences of them */
+typedef H5C_cache_entry_t *H5C_cache_entry_ptr_t;
+
+/********************/
+/* Local Prototypes */
+/********************/
+static herr_t H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
+static herr_t H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
+static herr_t H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp);
+static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
+static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len,
+ hbool_t actual);
+static void *H5C__load_entry(H5F_t *f,
+#ifdef H5_HAVE_PARALLEL
+ hbool_t coll_access,
+#endif /* H5_HAVE_PARALLEL */
+ const H5C_class_t *type, haddr_t addr, void *udata);
+static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry);
+static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry);
+static herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry);
+static herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry);
+#ifndef NDEBUG
+static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry,
+ const H5C_cache_entry_t *base_entry);
+#endif
+static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr,
+ H5C_cache_entry_t **fd_children);
+static herr_t H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr,
+ const H5C_class_t *type, haddr_t addr, void *udata);
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* Declare a free list to manage arrays of cache entries */
+H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t);
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__pin_entry_from_client()
+ *
+ * Purpose: Internal routine to pin a cache entry from a client action.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/26/09
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__pin_entry_from_client(H5C_t
+#if !H5C_COLLECT_CACHE_STATS
+ H5_ATTR_UNUSED
+#endif
+ *cache_ptr,
+ H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_protected);
+
+ /* Check if the entry is already pinned */
+ if (entry_ptr->is_pinned) {
+ /* Check if the entry was pinned through an explicit pin from a client */
+ if (entry_ptr->pinned_from_client)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
+ } /* end if */
+ else {
+ entry_ptr->is_pinned = TRUE;
+
+ H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
+ } /* end else */
+
+ /* Mark that the entry was pinned through an explicit pin from a client */
+ entry_ptr->pinned_from_client = TRUE;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__pin_entry_from_client() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__unpin_entry_real()
+ *
+ * Purpose: Internal routine to unpin a cache entry.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 1/6/18
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+#ifdef H5C_DO_SANITY_CHECKS
+ FUNC_ENTER_PACKAGE
+#else
+ FUNC_ENTER_PACKAGE_NOERR
+#endif
+
+ /* Sanity checking */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_pinned);
+
+ /* If requested, update the replacement policy if the entry is not protected */
+ if (update_rp && !entry_ptr->is_protected)
+ H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
+
+ /* Unpin the entry now */
+ entry_ptr->is_pinned = FALSE;
+
+ /* Update the stats for an unpin operation */
+ H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+
+#ifdef H5C_DO_SANITY_CHECKS
+done:
+#endif
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__unpin_entry_real() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__unpin_entry_from_client()
+ *
+ * Purpose: Internal routine to unpin a cache entry from a client action.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/24/09
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checking */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
+
+ /* Error checking (should be sanity checks?) */
+ if (!entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
+ if (!entry_ptr->pinned_from_client)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
+
+ /* Check if the entry is not pinned from a flush dependency */
+ if (!entry_ptr->pinned_from_cache)
+ if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
+
+ /* Mark the entry as explicitly unpinned by the client */
+ entry_ptr->pinned_from_client = FALSE;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__unpin_entry_from_client() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__generate_image
+ *
+ * Purpose: Serialize an entry and generate its image.
+ *
+ * Note: This may cause the entry to be re-sized and/or moved in
+ * the cache.
+ *
+ * As we will not update the metadata cache's data structures
+ * until we we finish the write, we must touch up these
+ * data structures for size and location changes even if we
+ * are about to delete the entry from the cache (i.e. on a
+ * flush destroy).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ * 2/10/16
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
+{
+ haddr_t new_addr = HADDR_UNDEF;
+ haddr_t old_addr = HADDR_UNDEF;
+ size_t new_len = 0;
+ unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(entry_ptr->type);
+
+ /* make note of the entry's current address */
+ old_addr = entry_ptr->addr;
+
+ /* Call client's pre-serialize callback, if there's one */
+ if ((entry_ptr->type->pre_serialize) &&
+ ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr,
+ &new_len, &serialize_flags) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
+
+ /* Check for any flags set in the pre-serialize callback */
+ if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
+ /* Check for unexpected flags from serialize callback */
+ if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
+
+#ifdef H5_HAVE_PARALLEL
+ /* In the parallel case, resizes and moves in
+ * the serialize operation can cause problems.
+ * If they occur, scream and die.
+ *
+ * At present, in the parallel case, the aux_ptr
+ * will only be set if there is more than one
+ * process. Thus we can use this to detect
+ * the parallel case.
+ *
+ * This works for now, but if we start using the
+ * aux_ptr for other purposes, we will have to
+ * change this test accordingly.
+ *
+ * NB: While this test detects entryies that attempt
+ * to resize or move themselves during a flush
+ * in the parallel case, it will not detect an
+ * entry that dirties, resizes, and/or moves
+ * other entries during its flush.
+ */
+ if (cache_ptr->aux_ptr != NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
+#endif
+
+ /* If required, resize the buffer and update the entry and the cache
+ * data structures
+ */
+ if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
+ /* Sanity check */
+ HDassert(new_len > 0);
+
+ /* Allocate a new image buffer */
+ if (NULL ==
+ (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for on disk image buffer")
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ /* Update statistics for resizing the entry */
+ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
+
+ /* Update the hash table for the size change */
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr,
+ !entry_ptr->is_dirty, FAIL);
+
+ /* The entry can't be protected since we are in the process of
+ * flushing it. Thus we must update the replacement policy data
+ * structures for the size change. The macro deals with the pinned
+ * case.
+ */
+ H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len, FAIL);
+
+ /* As we haven't updated the cache data structures for
+ * for the flush or flush destroy yet, the entry should
+ * be in the slist if the slist is enabled. Since
+ * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the
+ * slist is enabled, call it un-conditionally.
+ */
+ HDassert(entry_ptr->is_dirty);
+ HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled));
+
+ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
+
+ /* Finally, update the entry for its new size */
+ entry_ptr->size = new_len;
+ } /* end if */
+
+ /* If required, udate the entry and the cache data structures
+ * for a move
+ */
+ if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
+ /* Update stats and entries relocated counter */
+ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
+
+ /* We must update cache data structures for the change in address */
+ if (entry_ptr->addr == old_addr) {
+ /* Delete the entry from the hash table and the slist */
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
+
+ /* Update the entry for its new address */
+ entry_ptr->addr = new_addr;
+
+ /* And then reinsert in the index and slist */
+ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
+ } /* end if */
+ else /* move is already done for us -- just do sanity checks */
+ HDassert(entry_ptr->addr == new_addr);
+ } /* end if */
+ } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
+
+ /* Serialize object into buffer */
+ if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE));
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ entry_ptr->image_up_to_date = TRUE;
+
+ /* Propagate the fact that the entry is serialized up the
+ * flush dependency chain if appropriate. Since the image must
+ * have been out of date for this function to have been called
+ * (see assertion on entry), no need to check that -- only check
+ * for flush dependency parents.
+ */
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_serialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__generate_image */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_single_entry
+ *
+ * Purpose: Flush or clear (and evict if requested) the cache entry
+ * with the specified address and type. If the type is NULL,
+ * any unprotected entry at the specified address will be
+ * flushed (and possibly evicted).
+ *
+ * Attempts to flush a protected entry will result in an
+ * error.
+ *
+ * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
+ * be cleared and not flushed, and the call can't be part of a
+ * sequence of flushes.
+ *
+ * The function does nothing silently if there is no entry
+ * at the supplied address, or if the entry found has the
+ * wrong type.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ * Programmer: John Mainzer, 5/5/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
+{
+ H5C_t *cache_ptr; /* Cache for file */
+ hbool_t destroy; /* external flag */
+ hbool_t clear_only; /* external flag */
+ hbool_t free_file_space; /* external flag */
+ hbool_t take_ownership; /* external flag */
+ hbool_t del_from_slist_on_destroy; /* external flag */
+ hbool_t during_flush; /* external flag */
+ hbool_t write_entry; /* internal flag */
+ hbool_t destroy_entry; /* internal flag */
+ hbool_t generate_image; /* internal flag */
+ hbool_t update_page_buffer; /* internal flag */
+ hbool_t was_dirty;
+ hbool_t suppress_image_entry_writes = FALSE;
+ hbool_t suppress_image_entry_frees = FALSE;
+ haddr_t entry_addr = HADDR_UNDEF;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->type);
+
+ /* setup external flags from the flags parameter */
+ destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
+ clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
+ del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
+ during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
+ generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
+ update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
+
+ /* Set the flag for destroying the entry, based on the 'take ownership'
+ * and 'destroy' flags
+ */
+ if (take_ownership)
+ destroy_entry = FALSE;
+ else
+ destroy_entry = destroy;
+
+ /* we will write the entry to disk if it exists, is dirty, and if the
+ * clear only flag is not set.
+ */
+ if (entry_ptr->is_dirty && !clear_only)
+ write_entry = TRUE;
+ else
+ write_entry = FALSE;
+
+ /* if we have received close warning, and we have been instructed to
+ * generate a metadata cache image, and we have actually constructed
+ * the entry images, set suppress_image_entry_frees to TRUE.
+ *
+ * Set suppress_image_entry_writes to TRUE if indicated by the
+ * image_ctl flags.
+ */
+ if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image &&
+ cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries != NULL) {
+
+ /* Sanity checks */
+ HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
+ HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
+ HDassert((!clear_only) || !(entry_ptr->include_in_image));
+ HDassert((!take_ownership) || !(entry_ptr->include_in_image));
+ HDassert((!free_file_space) || !(entry_ptr->include_in_image));
+
+ suppress_image_entry_frees = TRUE;
+
+ if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES)
+ suppress_image_entry_writes = TRUE;
+ } /* end if */
+
+ /* run initial sanity checks */
+#ifdef H5C_DO_SANITY_CHECKS
+ if (cache_ptr->slist_enabled) {
+ if (entry_ptr->in_slist) {
+ HDassert(entry_ptr->is_dirty);
+ if (entry_ptr->flush_marker && !entry_ptr->is_dirty)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
+ } /* end if */
+ else {
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(!entry_ptr->flush_marker);
+ if (entry_ptr->is_dirty || entry_ptr->flush_marker)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
+ } /* end else */
+ }
+ else { /* slist is disabled */
+ HDassert(!entry_ptr->in_slist);
+ if (!entry_ptr->is_dirty)
+ if (entry_ptr->flush_marker)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?")
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if (entry_ptr->is_protected)
+ /* Attempt to flush a protected entry -- scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
+
+ /* Set entry_ptr->flush_in_progress = TRUE and set
+ * entry_ptr->flush_marker = FALSE
+ *
+ * We will set flush_in_progress back to FALSE at the end if the
+ * entry still exists at that point.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+ entry_ptr->flush_marker = FALSE;
+
+ /* Preserve current dirty state for later */
+ was_dirty = entry_ptr->is_dirty;
+
+ /* The entry is dirty, and we are doing a flush, a flush destroy or have
+ * been requested to generate an image. In those cases, serialize the
+ * entry.
+ */
+ if (write_entry || generate_image) {
+ HDassert(entry_ptr->is_dirty);
+ if (NULL == entry_ptr->image_ptr) {
+ if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for on disk image buffer")
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ } /* end if */
+
+ if (!entry_ptr->image_up_to_date) {
+ /* Sanity check */
+ HDassert(!entry_ptr->prefetched);
+
+ /* Generate the entry's image */
+ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
+ } /* end if ( ! (entry_ptr->image_up_to_date) ) */
+ } /* end if */
+
+ /* Finally, write the image to disk.
+ *
+ * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
+ * in the entry's type, we silently skip the write. This
+ * flag should only be used in test code.
+ */
+ if (write_entry) {
+ HDassert(entry_ptr->is_dirty);
+
+#ifdef H5C_DO_SANITY_CHECKS
+ if (cache_ptr->check_write_permitted && !cache_ptr->write_permitted)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Write the image to disk unless the write is suppressed.
+ *
+ * This happens if both suppress_image_entry_writes and
+ * entry_ptr->include_in_image are TRUE, or if the
+ * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
+ * flag should only be used in test code
+ */
+ if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) &&
+ ((entry_ptr->type->flags & H5C__CLASS_SKIP_WRITES) == 0)) {
+ H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
+
+#ifdef H5_HAVE_PARALLEL
+ if (cache_ptr->coll_write_list) {
+ if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
+ } /* end if */
+ else {
+#endif /* H5_HAVE_PARALLEL */
+ if (entry_ptr->prefetched) {
+ HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
+ } /* end if */
+ else
+ mem_type = entry_ptr->type->mem_type;
+
+ if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
+#ifdef H5_HAVE_PARALLEL
+ }
+#endif /* H5_HAVE_PARALLEL */
+ } /* end if */
+
+ /* if the entry has a notify callback, notify it that we have
+ * just flushed the entry.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
+ } /* if ( write_entry ) */
+
+ /* At this point, all pre-serialize and serialize calls have been
+ * made if it was appropriate to make them. Similarly, the entry
+ * has been written to disk if desired.
+ *
+ * Thus it is now safe to update the cache data structures for the
+ * flush.
+ */
+
+ /* start by updating the statistics */
+ if (clear_only) {
+ /* only log a clear if the entry was dirty */
+ if (was_dirty)
+ H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+ }
+ else if (write_entry) {
+ HDassert(was_dirty);
+
+ /* only log a flush if we actually wrote to disk */
+ H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
+ } /* end else if */
+
+ /* Note that the algorithm below is (very) similar to the set of operations
+ * in H5C_remove_entry() and should be kept in sync with changes
+ * to that code. - QAK, 2016/11/30
+ */
+
+ /* Update the cache internal data structures. */
+ if (destroy) {
+ /* Sanity checks */
+ if (take_ownership)
+ HDassert(!destroy_entry);
+ else
+ HDassert(destroy_entry);
+
+ HDassert(!entry_ptr->is_pinned);
+
+ /* Update stats, while entry is still in the cache */
+ H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
+
+ /* If the entry's type has a 'notify' callback and the entry is about
+ * to be removed from the cache, send a 'before eviction' notice while
+ * the entry is still fully integrated in the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+
+ /* Update the cache internal data structures as appropriate
+ * for a destroy. Specifically:
+ *
+ * 1) Delete it from the index
+ *
+ * 2) Delete it from the skip list if requested.
+ *
+ * 3) Delete it from the collective read access list.
+ *
+ * 4) Update the replacement policy for eviction
+ *
+ * 5) Remove it from the tag list for this object
+ *
+ * Finally, if the destroy_entry flag is set, discard the
+ * entry.
+ */
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
+
+ if (entry_ptr->in_slist && del_from_slist_on_destroy)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL)
+
+#ifdef H5_HAVE_PARALLEL
+ /* Check for collective read access flag */
+ if (entry_ptr->coll_access) {
+ entry_ptr->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+ H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+
+ /* Remove entry from tag list */
+ if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+
+ /* verify that the entry is no longer part of any flush dependencies */
+ HDassert(entry_ptr->flush_dep_nparents == 0);
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
+ } /* end if */
+ else {
+ HDassert(clear_only || write_entry);
+ HDassert(entry_ptr->is_dirty);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
+
+ /* We are either doing a flush or a clear.
+ *
+ * A clear and a flush are the same from the point of
+ * view of the replacement policy and the slist.
+ * Hence no differentiation between them.
+ */
+ H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush, FAIL)
+
+ /* mark the entry as clean and update the index for
+ * entry clean. Also, call the clear callback
+ * if defined.
+ */
+ entry_ptr->is_dirty = FALSE;
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL);
+
+ /* Check for entry changing status and do notifications, etc. */
+ if (was_dirty) {
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
+
+ /* Propagate the clean flag up the flush dependency chain
+ * if appropriate
+ */
+ if (entry_ptr->flush_dep_ndirty_children != 0)
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag")
+ } /* end if */
+ } /* end else */
+
+ /* reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
+
+ /* capture the cache entry address for the log_flush call at the
+ * end before the entry_ptr gets freed
+ */
+ entry_addr = entry_ptr->addr;
+
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
+ *
+ * Now discard the entry if appropriate.
+ */
+ if (destroy) {
+ /* Sanity check */
+ HDassert(0 == entry_ptr->flush_dep_nparents);
+
+ /* if both suppress_image_entry_frees and entry_ptr->include_in_image
+ * are true, simply set entry_ptr->image_ptr to NULL, as we have
+ * another pointer to the buffer in an instance of H5C_image_entry_t
+ * in cache_ptr->image_entries.
+ *
+ * Otherwise, free the buffer if it exists.
+ */
+ if (suppress_image_entry_frees && entry_ptr->include_in_image)
+ entry_ptr->image_ptr = NULL;
+ else if (entry_ptr->image_ptr != NULL)
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+
+ /* If the entry is not a prefetched entry, verify that the flush
+ * dependency parents addresses array has been transferred.
+ *
+ * If the entry is prefetched, the free_isr routine will dispose of
+ * the flush dependency parents addresses array if necessary.
+ */
+ if (!entry_ptr->prefetched) {
+ HDassert(0 == entry_ptr->fd_parent_count);
+ HDassert(NULL == entry_ptr->fd_parent_addrs);
+ } /* end if */
+
+ /* Check whether we should free the space in the file that
+ * the entry occupies
+ */
+ if (free_file_space) {
+ hsize_t fsf_size;
+
+ /* Sanity checks */
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
+#ifndef NDEBUG
+ {
+ size_t curr_len;
+
+ /* Get the actual image size for the thing again */
+ entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
+ HDassert(curr_len == entry_ptr->size);
+ }
+#endif
+
+ /* If the file space free size callback is defined, use
+ * it to get the size of the block of file space to free.
+ * Otherwise use entry_ptr->size.
+ */
+ if (entry_ptr->type->fsf_size) {
+ if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
+ } /* end if */
+ else /* no file space free size callback -- use entry size */
+ fsf_size = entry_ptr->size;
+
+ /* Release the space on disk */
+ if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
+ } /* end if ( free_file_space ) */
+
+ /* Reset the pointer to the cache the entry is within. -QAK */
+ entry_ptr->cache_ptr = NULL;
+
+ /* increment entries_removed_counter and set
+ * last_entry_removed_ptr. As we are likely abuut to
+ * free the entry, recall that last_entry_removed_ptr
+ * must NEVER be dereferenced.
+ *
+ * Recall that these fields are maintained to allow functions
+ * that perform scans of lists of entries to detect the
+ * unexpected removal of entries (via expunge, eviction,
+ * or take ownership at present), so that they can re-start
+ * their scans if necessary.
+ *
+ * Also check if the entry we are watching for removal is being
+ * removed (usually the 'next' entry for an iteration) and reset
+ * it to indicate that it was removed.
+ */
+ cache_ptr->entries_removed_counter++;
+ cache_ptr->last_entry_removed_ptr = entry_ptr;
+
+ if (entry_ptr == cache_ptr->entry_watched_for_removal)
+ cache_ptr->entry_watched_for_removal = NULL;
+
+ /* Check for actually destroying the entry in memory */
+ /* (As opposed to taking ownership of it) */
+ if (destroy_entry) {
+ if (entry_ptr->is_dirty) {
+ /* Reset dirty flag */
+ entry_ptr->is_dirty = FALSE;
+
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
+ } /* end if */
+
+ /* we are about to discard the in core representation --
+ * set the magic field to bad magic so we can detect a
+ * freed entry if we see one.
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+
+ /* verify that the image has been freed */
+ HDassert(entry_ptr->image_ptr == NULL);
+
+ if (entry_ptr->type->free_icr((void *)entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
+ } /* end if */
+ else {
+ HDassert(take_ownership);
+
+ /* Client is taking ownership of the entry. Set bad magic here too
+ * so the cache will choke unless the entry is re-inserted properly
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+ } /* end else */
+ } /* if (destroy) */
+
+ /* Check if we have to update the page buffer with cleared entries
+ * so it doesn't go out of date
+ */
+ if (update_page_buffer) {
+ /* Sanity check */
+ HDassert(!destroy);
+ HDassert(entry_ptr->image_ptr);
+
+ if (f->shared->page_buf && (f->shared->page_buf->page_size >= entry_ptr->size))
+ if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size,
+ entry_ptr->image_ptr) > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
+ } /* end if */
+
+ if (cache_ptr->log_flush)
+ if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
+
+done:
+ HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty));
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_single_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__verify_len_eoa
+ *
+ * Purpose: Verify that 'len' does not exceed eoa when 'actual' is
+ * false i.e. 'len" is the initial speculative length from
+ * get_load_size callback with null image pointer.
+ * If exceed, adjust 'len' accordingly.
+ *
+ * Verify that 'len' should not exceed eoa when 'actual' is
+ * true i.e. 'len' is the actual length from get_load_size
+ * callback with non-null image pointer.
+ * If exceed, return error.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Vailin Choi
+ * 9/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual)
+{
+ H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */
+ haddr_t eoa; /* End-of-allocation in the file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
+ * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
+ * Thus we do the same for purposes of computing the EOA
+ * for sanity checks.
+ */
+ cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
+
+ /* Get the file's end-of-allocation value */
+ eoa = H5F_get_eoa(f, cooked_type);
+ if (!H5F_addr_defined(eoa))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
+
+ /* Check for bad address in general */
+ if (H5F_addr_gt(addr, eoa))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
+
+ /* Check if the amount of data to read will be past the EOA */
+ if (H5F_addr_gt((addr + *len), eoa)) {
+ if (actual)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
+ else
+ /* Trim down the length of the metadata */
+ *len = (size_t)(eoa - addr);
+ } /* end if */
+
+ if (*len <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__verify_len_eoa() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__load_entry
+ *
+ * Purpose: Attempt to load the entry at the specified disk address
+ * and with the specified type into memory. If successful.
+ * return the in memory address of the entry. Return NULL
+ * on failure.
+ *
+ * Note that this function simply loads the entry into
+ * core. It does not insert it into the cache.
+ *
+ * Return: Non-NULL on success / NULL on failure.
+ *
+ * Programmer: John Mainzer, 5/18/04
+ *
+ *-------------------------------------------------------------------------
+ */
+void *
+H5C__load_entry(H5F_t *f,
+#ifdef H5_HAVE_PARALLEL
+ hbool_t coll_access,
+#endif /* H5_HAVE_PARALLEL */
+ const H5C_class_t *type, haddr_t addr, void *udata)
+{
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
+ uint8_t *image = NULL; /* Buffer for disk image */
+ void *thing = NULL; /* Pointer to thing loaded */
+ H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */
+ size_t len; /* Size of image in file */
+#ifdef H5_HAVE_PARALLEL
+ int mpi_rank = 0; /* MPI process rank */
+ MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */
+ int mpi_code; /* MPI error code */
+#endif /* H5_HAVE_PARALLEL */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(type);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(type->get_initial_load_size);
+ if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
+ HDassert(type->get_final_load_size);
+ else
+ HDassert(NULL == type->get_final_load_size);
+ HDassert(type->deserialize);
+
+ /* Can't see how skip reads could be usefully combined with
+ * the speculative read flag. Hence disallow.
+ */
+ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
+
+ /* Call the get_initial_load_size callback, to retrieve the initial size of image */
+ if (type->get_initial_load_size(udata, &len) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
+ HDassert(len > 0);
+
+ /* Check for possible speculative read off the end of the file */
+ if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
+ if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
+
+ /* Allocate the buffer for reading the on-disk entry image */
+ if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+#ifdef H5_HAVE_PARALLEL
+ if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
+ if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
+ if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Get the on-disk entry image */
+ if (0 == (type->flags & H5C__CLASS_SKIP_READS)) {
+ unsigned tries, max_tries; /* The # of read attempts */
+ unsigned retries; /* The # of retries */
+ htri_t chk_ret; /* return from verify_chksum callback */
+ size_t actual_len = len; /* The actual length, after speculative reads have been resolved */
+ uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */
+ void *new_image; /* Pointer to image */
+ hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */
+
+ /* Get the # of read attempts */
+ max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
+
+ /*
+ * This do/while loop performs the following till the metadata checksum
+ * is correct or the file's number of allowed read attempts are reached.
+ * --read the metadata
+ * --determine the actual size of the metadata
+ * --perform checksum verification
+ */
+ do {
+ if (actual_len != len) {
+ if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
+
+#ifdef H5_HAVE_PARALLEL
+ if (!coll_access || 0 == mpi_rank) {
+#endif /* H5_HAVE_PARALLEL */
+ if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) {
+#ifdef H5_HAVE_PARALLEL
+ if (coll_access) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDmemset(image, 0, len);
+ HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+ }
+ else
+#endif
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ } /* end if */
+ /* if the collective metadata read optimization is turned on,
+ * bcast the metadata read from process 0 to all ranks in the file
+ * communicator
+ */
+ if (coll_access) {
+ int buf_size;
+
+ H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
+ if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+ /* If the entry could be read speculatively and the length is still
+ * changing, check for updating the actual size
+ */
+ if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) {
+ /* Retrieve the actual length */
+ actual_len = len;
+ if (type->get_final_load_size(image, len, udata, &actual_len) < 0)
+ continue; /* Transfer control to while() and count towards retries */
+
+ /* Check for the length changing */
+ if (actual_len != len) {
+ /* Verify that the length isn't past the EOA for the file */
+ if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
+
+ /* Expand buffer to new size */
+ if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ if (actual_len > len) {
+#ifdef H5_HAVE_PARALLEL
+ if (!coll_access || 0 == mpi_rank) {
+#endif /* H5_HAVE_PARALLEL */
+ /* If the thing's image needs to be bigger for a speculatively
+ * loaded thing, go get the on-disk image again (the extra portion).
+ */
+ if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) <
+ 0) {
+#ifdef H5_HAVE_PARALLEL
+ if (coll_access) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDmemset(image + len, 0, actual_len - len);
+ HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ }
+ else
+#endif
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ }
+ /* If the collective metadata read optimization is turned on,
+ * Bcast the metadata read from process 0 to all ranks in the file
+ * communicator */
+ if (coll_access) {
+ int buf_size;
+
+ H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+ } /* end if */
+ } /* end if (actual_len != len) */
+ else {
+ /* The length has stabilized */
+ len_changed = FALSE;
+
+ /* Set the final length */
+ len = actual_len;
+ } /* else */
+ } /* end if */
+
+ /* If there's no way to verify the checksum for a piece of metadata
+ * (usually because there's no checksum in the file), leave now
+ */
+ if (type->verify_chksum == NULL)
+ break;
+
+ /* Verify the checksum for the metadata image */
+ if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
+ if (chk_ret == TRUE)
+ break;
+
+ /* Sleep for some time */
+ H5_nanosleep(nanosec);
+ nanosec *= 2; /* Double the sleep time next time */
+ } while (--tries);
+
+ /* Check for too many tries */
+ if (tries == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts")
+
+ /* Calculate and track the # of retries */
+ retries = max_tries - tries;
+ if (retries) /* Does not track 0 retry */
+ if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
+
+ /* Set the final length (in case it wasn't set earlier) */
+ len = actual_len;
+ } /* end if !H5C__CLASS_SKIP_READS */
+
+ /* Deserialize the on-disk image into the native memory form */
+ if (NULL == (thing = type->deserialize(image, len, udata, &dirty)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
+
+ entry = (H5C_cache_entry_t *)thing;
+
+ /* In general, an entry should be clean just after it is loaded.
+ *
+ * However, when this code is used in the metadata cache, it is
+ * possible that object headers will be dirty at this point, as
+ * the deserialize function will alter object headers if necessary to
+ * fix an old bug.
+ *
+ * In the following assert:
+ *
+ * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
+ *
+ * note that type ids 5 & 6 are associated with object headers in the
+ * metadata cache.
+ *
+ * When we get to using H5C for other purposes, we may wish to
+ * tighten up the assert so that the loophole only applies to the
+ * metadata cache.
+ */
+
+ HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
+
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry->cache_ptr = f->shared->cache;
+ entry->addr = addr;
+ entry->size = len;
+ HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
+ entry->image_ptr = image;
+ entry->image_up_to_date = !dirty;
+ entry->type = type;
+ entry->is_dirty = dirty;
+ entry->dirtied = FALSE;
+ entry->is_protected = FALSE;
+ entry->is_read_only = FALSE;
+ entry->ro_ref_count = 0;
+ entry->is_pinned = FALSE;
+ entry->in_slist = FALSE;
+ entry->flush_marker = FALSE;
+#ifdef H5_HAVE_PARALLEL
+ entry->clear_on_unprotect = FALSE;
+ entry->flush_immediately = FALSE;
+ entry->coll_access = coll_access;
+#endif /* H5_HAVE_PARALLEL */
+ entry->flush_in_progress = FALSE;
+ entry->destroy_in_progress = FALSE;
+
+ entry->ring = H5C_RING_UNDEFINED;
+
+ /* Initialize flush dependency fields */
+ entry->flush_dep_parent = NULL;
+ entry->flush_dep_nparents = 0;
+ entry->flush_dep_parent_nalloc = 0;
+ entry->flush_dep_nchildren = 0;
+ entry->flush_dep_ndirty_children = 0;
+ entry->flush_dep_nunser_children = 0;
+ entry->ht_next = NULL;
+ entry->ht_prev = NULL;
+ entry->il_next = NULL;
+ entry->il_prev = NULL;
+
+ entry->next = NULL;
+ entry->prev = NULL;
+
+#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+ entry->aux_next = NULL;
+ entry->aux_prev = NULL;
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#ifdef H5_HAVE_PARALLEL
+ entry->coll_next = NULL;
+ entry->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+ /* initialize cache image related fields */
+ entry->include_in_image = FALSE;
+ entry->lru_rank = 0;
+ entry->image_dirty = FALSE;
+ entry->fd_parent_count = 0;
+ entry->fd_parent_addrs = NULL;
+ entry->fd_child_count = 0;
+ entry->fd_dirty_child_count = 0;
+ entry->image_fd_height = 0;
+ entry->prefetched = FALSE;
+ entry->prefetch_type_id = 0;
+ entry->age = 0;
+ entry->prefetched_dirty = FALSE;
+#ifndef NDEBUG /* debugging field */
+ entry->serialization_count = 0;
+#endif
+
+ /* initialize tag list fields */
+ entry->tl_next = NULL;
+ entry->tl_prev = NULL;
+ entry->tag_info = NULL;
+
+ H5C__RESET_CACHE_ENTRY_STATS(entry);
+
+ ret_value = thing;
+
+done:
+ /* Cleanup on error */
+ if (NULL == ret_value) {
+ /* Release resources */
+ if (thing && type->free_icr(thing) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
+ if (image)
+ image = (uint8_t *)H5MM_xfree(image);
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__load_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_dirty()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming dirty or having its flush_dep_ndirty_children
+ * increased from 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(entry);
+
+ /* Iterate over the parent entries, if any */
+ for (u = 0; u < entry->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children <
+ entry->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
+ if (entry->flush_dep_parent[u]->type->notify &&
+ (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED,
+ entry->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag set")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_dirty() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_clean()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming clean or having its flush_dep_ndirty_children
+ * reduced to 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry)
+{
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(entry);
+
+ /* Iterate over the parent entries, if any */
+ /* Note reverse iteration order, in case the callback removes the flush
+ * dependency - QAK, 2017/08/12
+ */
+ for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
+ if (entry->flush_dep_parent[i]->type->notify &&
+ (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED,
+ entry->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag reset")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_clean() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_serialized()
+ *
+ * Purpose: Decrement the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming serialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr)
+{
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+
+ /* Iterate over the parent entries, if any */
+ /* Note reverse iteration order, in case the callback removes the flush
+ * dependency - QAK, 2017/08/12
+ */
+ for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
+ /* Sanity checks */
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
+
+ /* decrement the parents number of unserialized children */
+ entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if (entry_ptr->flush_dep_parent[i]->type->notify &&
+ (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED,
+ entry_ptr->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag set")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_serialized() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_unserialized()
+ *
+ * Purpose: Increment the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming unserialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+
+ /* Iterate over the parent entries, if any */
+ for (u = 0; u < entry_ptr->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
+ entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* increment parents number of usserialized children */
+ entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if (entry_ptr->flush_dep_parent[u]->type->notify &&
+ (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED,
+ entry_ptr->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag reset")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_unserialized() */
+
+#ifndef NDEBUG
+/*-------------------------------------------------------------------------
+ * Function: H5C__assert_flush_dep_nocycle()
+ *
+ * Purpose: Assert recursively that base_entry is not the same as
+ * entry, and perform the same assertion on all of entry's
+ * flush dependency parents. This is used to detect cycles
+ * created by flush dependencies.
+ *
+ * Return: void
+ *
+ * Programmer: Neil Fortner
+ * 12/10/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry)
+{
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_PACKAGE_NOERR
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(base_entry);
+
+ /* Make sure the entries are not the same */
+ HDassert(base_entry != entry);
+
+ /* Iterate over entry's parents (if any) */
+ for (u = 0; u < entry->flush_dep_nparents; u++)
+ H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5C__assert_flush_dep_nocycle() */
+#endif
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_single_entry
+ *
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * parameter.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer, 7/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->flush_in_progress);
+ HDassert(entry_ptr->type);
+
+ /* Set entry_ptr->flush_in_progress to TRUE so the target entry
+ * will not be evicted out from under us. Must set it back to FALSE
+ * when we are done.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+
+ /* Allocate buffer for the entry image if required. */
+ if (NULL == entry_ptr->image_ptr) {
+ HDassert(entry_ptr->size > 0);
+ if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
+
+ /* Generate image for entry */
+ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
+
+ /* Reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
+
+done:
+ HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_single_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__destroy_pf_entry_child_flush_deps()
+ *
+ * Purpose: Destroy all flush dependencies in this the supplied
+ * prefetched entry is the parent. Note that the children
+ * in these flush dependencies must be prefetched entries as
+ * well.
+ *
+ * As this action is part of the process of transferring all
+ * such flush dependencies to the deserialized version of the
+ * prefetched entry, ensure that the data necessary to complete
+ * the transfer is retained.
+ *
+ * Note: The current implementation of this function is
+ * quite inefficient -- mostly due to the current
+ * implementation of flush dependencies. This should
+ * be fixed at some point.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr,
+ H5C_cache_entry_t **fd_children)
+{
+ H5C_cache_entry_t *entry_ptr;
+#ifndef NDEBUG
+ unsigned entries_visited = 0;
+#endif
+ int fd_children_found = 0;
+ hbool_t found;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(pf_entry_ptr);
+ HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(pf_entry_ptr->type);
+ HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ HDassert(pf_entry_ptr->prefetched);
+ HDassert(pf_entry_ptr->fd_child_count > 0);
+ HDassert(fd_children);
+
+ /* Scan each entry on the index list */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Here we look at entry_ptr->flush_dep_nparents and not
+ * entry_ptr->fd_parent_count as it is possible that some
+ * or all of the prefetched flush dependency child relationships
+ * have already been destroyed.
+ */
+ if (entry_ptr->prefetched && (entry_ptr->flush_dep_nparents > 0)) {
+ unsigned u; /* Local index variable */
+
+ /* Re-init */
+ u = 0;
+ found = FALSE;
+
+ /* Sanity checks */
+ HDassert(entry_ptr->type);
+ HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ HDassert(entry_ptr->fd_parent_count >= entry_ptr->flush_dep_nparents);
+ HDassert(entry_ptr->fd_parent_addrs);
+ HDassert(entry_ptr->flush_dep_parent);
+
+ /* Look for correct entry */
+ while (!found && (u < entry_ptr->fd_parent_count)) {
+ /* Sanity check entry */
+ HDassert(entry_ptr->flush_dep_parent[u]);
+ HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Correct entry? */
+ if (pf_entry_ptr == entry_ptr->flush_dep_parent[u])
+ found = TRUE;
+
+ u++;
+ } /* end while */
+
+ if (found) {
+ HDassert(NULL == fd_children[fd_children_found]);
+
+ /* Remove flush dependency */
+ fd_children[fd_children_found] = entry_ptr;
+ fd_children_found++;
+ if (H5C_destroy_flush_dependency(pf_entry_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "can't destroy pf entry child flush dependency")
+
+#ifndef NDEBUG
+ /* Sanity check -- verify that the address of the parent
+ * appears in entry_ptr->fd_parent_addrs. Must do a search,
+ * as with flush dependency creates and destroys,
+ * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent
+ * can list parents in different order.
+ */
+ found = FALSE;
+ u = 0;
+ while (!found && u < entry_ptr->fd_parent_count) {
+ if (pf_entry_ptr->addr == entry_ptr->fd_parent_addrs[u])
+ found = TRUE;
+ u++;
+ } /* end while */
+ HDassert(found);
+#endif
+ } /* end if */
+ } /* end if */
+
+#ifndef NDEBUG
+ entries_visited++;
+#endif
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+
+ /* Post-op sanity checks */
+ HDassert(NULL == fd_children[fd_children_found]);
+ HDassert((unsigned)fd_children_found == pf_entry_ptr->fd_child_count);
+ HDassert(entries_visited == cache_ptr->index_len);
+ HDassert(!pf_entry_ptr->is_pinned);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__destroy_pf_entry_child_flush_deps() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__deserialize_prefetched_entry()
+ *
+ * Purpose: Deserialize the supplied prefetched entry entry, and return
+ * a pointer to the deserialized entry in *entry_ptr_ptr.
+ * If successful, remove the prefetched entry from the cache,
+ * and free it. Insert the deserialized entry into the cache.
+ *
+ * Note that the on disk image of the entry is not freed --
+ * a pointer to it is stored in the deserialized entries'
+ * image_ptr field, and its image_up_to_date field is set to
+ * TRUE unless the entry is dirtied by the deserialize call.
+ *
+ * If the prefetched entry is a flush dependency child,
+ * destroy that flush dependency prior to calling the
+ * deserialize callback. If appropriate, the flush dependency
+ * relationship will be recreated by the cache client.
+ *
+ * If the prefetched entry is a flush dependency parent,
+ * destroy the flush dependency relationship with all its
+ * children. As all these children must be prefetched entries,
+ * recreate these flush dependency relationships with
+ * deserialized entry after it is inserted in the cache.
+ *
+ * Since deserializing a prefetched entry is semantically
+ * equivalent to a load, issue an entry loaded nofification
+ * if the notify callback is defined.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Note that *entry_ptr_ptr is undefined on failure.
+ *
+ * Programmer: John Mainzer, 8/10/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr,
+ const H5C_class_t *type, haddr_t addr, void *udata)
+{
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was
+ * dirtied during deserialize
+ */
+ size_t len; /* Size of image in file */
+ void *thing = NULL; /* Pointer to thing loaded */
+ H5C_cache_entry_t *pf_entry_ptr; /* pointer to the prefetched entry */
+ /* supplied in *entry_ptr_ptr. */
+ H5C_cache_entry_t *ds_entry_ptr; /* Alias for thing loaded, as cache
+ * entry
+ */
+ H5C_cache_entry_t **fd_children = NULL; /* Pointer to a dynamically */
+ /* allocated array of pointers to */
+ /* the flush dependency children of */
+ /* the prefetched entry, or NULL if */
+ /* that array does not exist. */
+ unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG);
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(f->shared->cache == cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr_ptr);
+ HDassert(*entry_ptr_ptr);
+ pf_entry_ptr = *entry_ptr_ptr;
+ HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(pf_entry_ptr->type);
+ HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ HDassert(pf_entry_ptr->prefetched);
+ HDassert(pf_entry_ptr->image_up_to_date);
+ HDassert(pf_entry_ptr->image_ptr);
+ HDassert(pf_entry_ptr->size > 0);
+ HDassert(pf_entry_ptr->addr == addr);
+ HDassert(type);
+ HDassert(type->id == pf_entry_ptr->prefetch_type_id);
+ HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
+
+ /* verify absence of prohibited or unsupported type flag combinations */
+ HDassert(!(type->flags & H5C__CLASS_SKIP_READS));
+
+ /* Can't see how skip reads could be usefully combined with
+ * either the speculative read flag. Hence disallow.
+ */
+ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
+ HDassert(H5F_addr_defined(addr));
+ HDassert(type->get_initial_load_size);
+ HDassert(type->deserialize);
+
+ /* if *pf_entry_ptr is a flush dependency child, destroy all such
+ * relationships now. The client will restore the relationship(s) with
+ * the deserialized entry if appropriate.
+ */
+ HDassert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents);
+ for (i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) {
+ HDassert(pf_entry_ptr->flush_dep_parent);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0);
+ HDassert(pf_entry_ptr->fd_parent_addrs);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]);
+
+ if (H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency")
+
+ pf_entry_ptr->fd_parent_addrs[i] = HADDR_UNDEF;
+ } /* end for */
+ HDassert(pf_entry_ptr->flush_dep_nparents == 0);
+
+ /* If *pf_entry_ptr is a flush dependency parent, destroy its flush
+ * dependency relationships with all its children (which must be
+ * prefetched entries as well).
+ *
+ * These flush dependency relationships will have to be restored
+ * after the deserialized entry is inserted into the cache in order
+ * to transfer these relationships to the new entry. Hence save the
+ * pointers to the flush dependency children of *pf_enty_ptr for later
+ * use.
+ */
+ if (pf_entry_ptr->fd_child_count > 0) {
+ if (NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc(
+ sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array")
+
+ if (H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "can't destroy pf entry child flush dependency(s).")
+ } /* end if */
+
+ /* Since the size of the on disk image is known exactly, there is
+ * no need for either a call to the get_initial_load_size() callback,
+ * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set.
+ * Similarly, there is no need to clamp possible reads beyond
+ * EOF.
+ */
+ len = pf_entry_ptr->size;
+
+ /* Deserialize the prefetched on-disk image of the entry into the
+ * native memory form
+ */
+ if (NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image")
+ ds_entry_ptr = (H5C_cache_entry_t *)thing;
+
+ /* In general, an entry should be clean just after it is loaded.
+ *
+ * However, when this code is used in the metadata cache, it is
+ * possible that object headers will be dirty at this point, as
+ * the deserialize function will alter object headers if necessary to
+ * fix an old bug.
+ *
+ * In the following assert:
+ *
+ * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
+ *
+ * note that type ids 5 & 6 are associated with object headers in the
+ * metadata cache.
+ *
+ * When we get to using H5C for other purposes, we may wish to
+ * tighten up the assert so that the loophole only applies to the
+ * metadata cache.
+ *
+ * Note that at present, dirty can't be set to true with prefetched
+ * entries. However this may change, so include this functionality
+ * against that possibility.
+ *
+ * Also, note that it is possible for a prefetched entry to be dirty --
+ * hence the value assigned to ds_entry_ptr->is_dirty below.
+ */
+
+ HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
+
+ ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ ds_entry_ptr->cache_ptr = f->shared->cache;
+ ds_entry_ptr->addr = addr;
+ ds_entry_ptr->size = len;
+ HDassert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE);
+ ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr;
+ ds_entry_ptr->image_up_to_date = !dirty;
+ ds_entry_ptr->type = type;
+ ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty;
+ ds_entry_ptr->dirtied = FALSE;
+ ds_entry_ptr->is_protected = FALSE;
+ ds_entry_ptr->is_read_only = FALSE;
+ ds_entry_ptr->ro_ref_count = 0;
+ ds_entry_ptr->is_pinned = FALSE;
+ ds_entry_ptr->in_slist = FALSE;
+ ds_entry_ptr->flush_marker = FALSE;
+#ifdef H5_HAVE_PARALLEL
+ ds_entry_ptr->clear_on_unprotect = FALSE;
+ ds_entry_ptr->flush_immediately = FALSE;
+ ds_entry_ptr->coll_access = FALSE;
+#endif /* H5_HAVE_PARALLEL */
+ ds_entry_ptr->flush_in_progress = FALSE;
+ ds_entry_ptr->destroy_in_progress = FALSE;
+
+ ds_entry_ptr->ring = pf_entry_ptr->ring;
+
+ /* Initialize flush dependency height fields */
+ ds_entry_ptr->flush_dep_parent = NULL;
+ ds_entry_ptr->flush_dep_nparents = 0;
+ ds_entry_ptr->flush_dep_parent_nalloc = 0;
+ ds_entry_ptr->flush_dep_nchildren = 0;
+ ds_entry_ptr->flush_dep_ndirty_children = 0;
+ ds_entry_ptr->flush_dep_nunser_children = 0;
+
+ /* Initialize fields supporting the hash table: */
+ ds_entry_ptr->ht_next = NULL;
+ ds_entry_ptr->ht_prev = NULL;
+ ds_entry_ptr->il_next = NULL;
+ ds_entry_ptr->il_prev = NULL;
+
+ /* Initialize fields supporting replacement policies: */
+ ds_entry_ptr->next = NULL;
+ ds_entry_ptr->prev = NULL;
+#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+ ds_entry_ptr->aux_next = NULL;
+ ds_entry_ptr->aux_prev = NULL;
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+#ifdef H5_HAVE_PARALLEL
+ pf_entry_ptr->coll_next = NULL;
+ pf_entry_ptr->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Initialize cache image related fields */
+ ds_entry_ptr->include_in_image = FALSE;
+ ds_entry_ptr->lru_rank = 0;
+ ds_entry_ptr->image_dirty = FALSE;
+ ds_entry_ptr->fd_parent_count = 0;
+ ds_entry_ptr->fd_parent_addrs = NULL;
+ ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count;
+ ds_entry_ptr->fd_dirty_child_count = 0;
+ ds_entry_ptr->image_fd_height = 0;
+ ds_entry_ptr->prefetched = FALSE;
+ ds_entry_ptr->prefetch_type_id = 0;
+ ds_entry_ptr->age = 0;
+ ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty;
+#ifndef NDEBUG /* debugging field */
+ ds_entry_ptr->serialization_count = 0;
+#endif
+
+ H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr);
+
+ /* Apply to to the newly deserialized entry */
+ if (H5C__tag_entry(cache_ptr, ds_entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
+
+ /* We have successfully deserialized the prefetched entry.
+ *
+ * Before we return a pointer to the deserialized entry, we must remove
+ * the prefetched entry from the cache, discard it, and replace it with
+ * the deserialized entry. Note that we do not free the prefetched
+ * entries image, as that has been transferred to the deserialized
+ * entry.
+ *
+ * Also note that we have not yet restored any flush dependencies. This
+ * must wait until the deserialized entry is inserted in the cache.
+ *
+ * To delete the prefetched entry from the cache:
+ *
+ * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already
+ * transferred the buffer containing the image to *ds_entry_ptr,
+ * this is not a memory leak.
+ *
+ * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG
+ * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set.
+ */
+ pf_entry_ptr->image_ptr = NULL;
+
+ if (pf_entry_ptr->is_dirty) {
+ HDassert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) ||
+ ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist)));
+
+ flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
+ } /* end if */
+
+ if (H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't expunge prefetched entry")
+
+#ifndef NDEGUG /* verify deletion */
+ H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL);
+
+ HDassert(NULL == pf_entry_ptr);
+#endif
+
+ /* Insert the deserialized entry into the cache. */
+ H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL)
+
+ HDassert(!ds_entry_ptr->in_slist);
+ if (ds_entry_ptr->is_dirty)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL)
+
+ H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL)
+
+ /* Deserializing a prefetched entry is the conceptual equivalent of
+ * loading it from file. If the deserialized entry has a notify callback,
+ * send an "after load" notice now that the deserialized entry is fully
+ * integrated into the cache.
+ */
+ if (ds_entry_ptr->type->notify &&
+ (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache")
+
+ /* Restore flush dependencies with the flush dependency children of
+ * of the prefetched entry. Note that we must protect *ds_entry_ptr
+ * before the call to avoid triggering sanity check failures, and
+ * then unprotect it afterwards.
+ */
+ i = 0;
+ if (fd_children != NULL) {
+ H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL)
+ ds_entry_ptr->is_protected = TRUE;
+ while (fd_children[i] != NULL) {
+ /* Sanity checks */
+ HDassert((fd_children[i])->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert((fd_children[i])->prefetched);
+ HDassert((fd_children[i])->fd_parent_count > 0);
+ HDassert((fd_children[i])->fd_parent_addrs);
+
+#ifndef NDEBUG
+ {
+ int j;
+ hbool_t found;
+
+ j = 0;
+ found = FALSE;
+ while ((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) {
+ if ((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr)
+ found = TRUE;
+
+ j++;
+ } /* end while */
+ HDassert(found);
+ }
+#endif
+
+ if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency")
+
+ i++;
+ } /* end while */
+
+ H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL);
+ ds_entry_ptr->is_protected = FALSE;
+ } /* end if ( fd_children != NULL ) */
+ HDassert((unsigned)i == ds_entry_ptr->fd_child_count);
+
+ ds_entry_ptr->fd_child_count = 0;
+ H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
+
+ /* finally, pass ds_entry_ptr back to the caller */
+ *entry_ptr_ptr = ds_entry_ptr;
+
+done:
+ if (fd_children)
+ fd_children = (H5C_cache_entry_t **)H5MM_xfree((void *)fd_children);
+
+ /* Release resources on error */
+ if (FAIL == ret_value)
+ if (thing && type->free_icr(thing) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__deserialize_prefetched_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_insert_entry
+ *
+ * Purpose: Adds the specified thing to the cache. The thing need not
+ * exist on disk yet, but it must have an address and disk
+ * space reserved.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, unsigned int flags)
+{
+ H5C_t *cache_ptr;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
+ hbool_t insert_pinned;
+ hbool_t flush_last;
+#ifdef H5_HAVE_PARALLEL
+ hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
+#endif /* H5_HAVE_PARALLEL */
+ hbool_t set_flush_marker;
+ hbool_t write_permitted = TRUE;
+ size_t empty_space;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *test_entry_ptr;
+ hbool_t entry_tagged = FALSE;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ cache_ptr = f->shared->cache;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(type);
+ HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
+ HDassert(type->image_len);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(thing);
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ /* no need to verify that entry is not already in the index as */
+ /* we already make that check below. */
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
+ insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
+ flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
+
+ /* Get the ring type from the API context */
+ ring = H5CX_get_ring();
+
+ entry_ptr = (H5C_cache_entry_t *)thing;
+
+ /* verify that the new entry isn't already in the hash table -- scream
+ * and die if it is.
+ */
+
+ H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
+ if (test_entry_ptr != NULL) {
+ if (test_entry_ptr == entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache")
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache")
+ } /* end if */
+
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry_ptr->cache_ptr = cache_ptr;
+ entry_ptr->addr = addr;
+ entry_ptr->type = type;
+
+ entry_ptr->image_ptr = NULL;
+ entry_ptr->image_up_to_date = FALSE;
+
+ entry_ptr->is_protected = FALSE;
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+
+ entry_ptr->is_pinned = insert_pinned;
+ entry_ptr->pinned_from_client = insert_pinned;
+ entry_ptr->pinned_from_cache = FALSE;
+ entry_ptr->flush_me_last = flush_last;
+
+ /* newly inserted entries are assumed to be dirty */
+ entry_ptr->is_dirty = TRUE;
+
+ /* not protected, so can't be dirtied */
+ entry_ptr->dirtied = FALSE;
+
+ /* Retrieve the size of the thing */
+ if ((type->image_len)(thing, &(entry_ptr->size)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "can't get size of thing")
+ HDassert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE);
+
+ entry_ptr->in_slist = FALSE;
+
+#ifdef H5_HAVE_PARALLEL
+ entry_ptr->clear_on_unprotect = FALSE;
+ entry_ptr->flush_immediately = FALSE;
+#endif /* H5_HAVE_PARALLEL */
+
+ entry_ptr->flush_in_progress = FALSE;
+ entry_ptr->destroy_in_progress = FALSE;
+
+ entry_ptr->ring = ring;
+
+ /* Initialize flush dependency fields */
+ entry_ptr->flush_dep_parent = NULL;
+ entry_ptr->flush_dep_nparents = 0;
+ entry_ptr->flush_dep_parent_nalloc = 0;
+ entry_ptr->flush_dep_nchildren = 0;
+ entry_ptr->flush_dep_ndirty_children = 0;
+ entry_ptr->flush_dep_nunser_children = 0;
+
+ entry_ptr->ht_next = NULL;
+ entry_ptr->ht_prev = NULL;
+ entry_ptr->il_next = NULL;
+ entry_ptr->il_prev = NULL;
+
+ entry_ptr->next = NULL;
+ entry_ptr->prev = NULL;
+
+#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+ entry_ptr->aux_next = NULL;
+ entry_ptr->aux_prev = NULL;
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#ifdef H5_HAVE_PARALLEL
+ entry_ptr->coll_next = NULL;
+ entry_ptr->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+ /* initialize cache image related fields */
+ entry_ptr->include_in_image = FALSE;
+ entry_ptr->lru_rank = 0;
+ entry_ptr->image_dirty = FALSE;
+ entry_ptr->fd_parent_count = 0;
+ entry_ptr->fd_parent_addrs = NULL;
+ entry_ptr->fd_child_count = 0;
+ entry_ptr->fd_dirty_child_count = 0;
+ entry_ptr->image_fd_height = 0;
+ entry_ptr->prefetched = FALSE;
+ entry_ptr->prefetch_type_id = 0;
+ entry_ptr->age = 0;
+ entry_ptr->prefetched_dirty = FALSE;
+#ifndef NDEBUG /* debugging field */
+ entry_ptr->serialization_count = 0;
+#endif
+
+ /* initialize tag list fields */
+ entry_ptr->tl_next = NULL;
+ entry_ptr->tl_prev = NULL;
+ entry_ptr->tag_info = NULL;
+
+ /* Apply tag to newly inserted entry */
+ if (H5C__tag_entry(cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
+ entry_tagged = TRUE;
+
+ H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
+
+ if (cache_ptr->flash_size_increase_possible &&
+ (entry_ptr->size > cache_ptr->flash_size_increase_threshold))
+ if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed")
+
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
+ empty_space = 0;
+ else
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ if (cache_ptr->evictions_enabled &&
+ (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) ||
+ (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) {
+ size_t space_needed;
+
+ if (empty_space <= entry_ptr->size)
+ cache_ptr->cache_full = TRUE;
+
+ if (cache_ptr->check_write_permitted != NULL) {
+ if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
+ } /* end if */
+ else
+ write_permitted = cache_ptr->write_permitted;
+
+ HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
+ space_needed = entry_ptr->size;
+ if (space_needed > cache_ptr->max_cache_size)
+ space_needed = cache_ptr->max_cache_size;
+
+ /* Note that space_needed is just the amount of space that
+ * needed to insert the new entry without exceeding the cache
+ * size limit. The subsequent call to H5C__make_space_in_cache()
+ * may evict the entries required to free more or less space
+ * depending on conditions. It MAY be less if the cache is
+ * currently undersized, or more if the cache is oversized.
+ *
+ * The cache can exceed its maximum size limit via the following
+ * mechanisms:
+ *
+ * First, it is possible for the cache to grow without
+ * bound as long as entries are protected and not unprotected.
+ *
+ * Second, when writes are not permitted it is also possible
+ * for the cache to grow without bound.
+ *
+ * Finally, we usually don't check to see if the cache is
+ * oversized at the end of an unprotect. As a result, it is
+ * possible to have a vastly oversized cache with no protected
+ * entries as long as all the protects precede the unprotects.
+ */
+
+ if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
+ } /* end if */
+
+ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
+
+ /* New entries are presumed to be dirty */
+ HDassert(entry_ptr->is_dirty);
+ entry_ptr->flush_marker = set_flush_marker;
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ /* If the entry's type has a 'notify' callback send a 'after insertion'
+ * notice now that the entry is fully integrated into the cache.
+ */
+ if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache")
+
+ H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
+
+#ifdef H5_HAVE_PARALLEL
+ if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
+ coll_access = H5F_get_coll_metadata_reads(f);
+
+ entry_ptr->coll_access = coll_access;
+ if (coll_access) {
+ H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+
+ /* Make sure the size of the collective entries in the cache remain in check */
+ if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) {
+ if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) {
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
+ } /* end if */
+ } /* end if */
+ else {
+ if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) {
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
+ } /* end if */
+ } /* end else */
+ } /* end if */
+#endif
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ if (ret_value < 0 && entry_tagged)
+ if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_insert_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_mark_entry_dirty
+ *
+ * Purpose: Mark a pinned or protected entry as dirty. The target entry
+ * MUST be either pinned or protected, and MAY be both.
+ *
+ * In the protected case, this call is the functional
+ * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
+ * call.
+ *
+ * In the pinned but not protected case, if the entry is not
+ * already dirty, the function places function marks the entry
+ * dirty and places it on the skip list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/15/06
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_mark_entry_dirty(void *thing)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ cache_ptr = entry_ptr->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (entry_ptr->is_protected) {
+ HDassert(!((entry_ptr)->is_read_only));
+
+ /* set the dirtied flag */
+ entry_ptr->dirtied = TRUE;
+
+ /* reset image_up_to_date */
+ if (entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "Can't propagate serialization status to fd parents")
+ } /* end if */
+ } /* end if */
+ else if (entry_ptr->is_pinned) {
+ hbool_t was_clean; /* Whether the entry was previously clean */
+ hbool_t image_was_up_to_date;
+
+ /* Remember previous dirty status */
+ was_clean = !entry_ptr->is_dirty;
+
+ /* Check if image is up to date */
+ image_was_up_to_date = entry_ptr->image_up_to_date;
+
+ /* Mark the entry as dirty if it isn't already */
+ entry_ptr->is_dirty = TRUE;
+ entry_ptr->image_up_to_date = FALSE;
+
+ /* Modify cache data structures */
+ if (was_clean)
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL)
+ if (!entry_ptr->in_slist)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+
+ /* Update stats for entry being marked dirty */
+ H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+
+ /* Check for entry changing status and do notifications, etc. */
+ if (was_clean) {
+ /* If the entry's type has a 'notify' callback send a 'entry dirtied'
+ * notice now that the entry is fully integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
+
+ /* Propagate the dirty flag up the flush dependency chain if appropriate */
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end if */
+ if (image_was_up_to_date)
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "Can't propagate serialization status to fd parents")
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_mark_entry_dirty() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_mark_entry_clean
+ *
+ * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned.
+ *
+ * If the entry is not
+ * already clean, the function places function marks the entry
+ * clean and removes it from the skip list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 7/23/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_mark_entry_clean(void *_thing)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ cache_ptr = entry_ptr->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Operate on pinned entry */
+ if (entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "entry is protected")
+ else if (entry_ptr->is_pinned) {
+ hbool_t was_dirty; /* Whether the entry was previously dirty */
+
+ /* Remember previous dirty status */
+ was_dirty = entry_ptr->is_dirty;
+
+ /* Mark the entry as clean if it isn't already */
+ entry_ptr->is_dirty = FALSE;
+
+ /* Also reset the 'flush_marker' flag, since the entry shouldn't be flushed now */
+ entry_ptr->flush_marker = FALSE;
+
+ /* Modify cache data structures */
+ if (was_dirty)
+ H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr, FAIL)
+ if (entry_ptr->in_slist)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
+
+ /* Update stats for entry being marked clean */
+ H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+
+ /* Check for entry changing status and do notifications, etc. */
+ if (was_dirty) {
+ /* If the entry's type has a 'notify' callback send a 'entry cleaned'
+ * notice now that the entry is fully integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
+
+ /* Propagate the clean up the flush dependency chain, if appropriate */
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean")
+ } /* end if */
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Entry is not pinned??")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_mark_entry_clean() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_mark_entry_unserialized
+ *
+ * Purpose: Mark a pinned or protected entry as unserialized. The target
+ * entry MUST be either pinned or protected, and MAY be both.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 12/23/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_mark_entry_unserialized(void *thing)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(H5F_addr_defined(entry->addr));
+
+ if (entry->is_protected || entry->is_pinned) {
+ HDassert(!entry->is_read_only);
+
+ /* Reset image_up_to_date */
+ if (entry->image_up_to_date) {
+ entry->image_up_to_date = FALSE;
+
+ if (entry->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_unserialized(entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL,
+ "Can't propagate serialization status to fd parents")
+ } /* end if */
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL,
+ "Entry to unserialize is neither pinned nor protected??")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_mark_entry_unserialized() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_mark_entry_serialized
+ *
+ * Purpose: Mark a pinned entry as serialized. The target entry MUST be
+ * pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 12/23/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_mark_entry_serialized(void *_thing)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(H5F_addr_defined(entry->addr));
+
+ /* Operate on pinned entry */
+ if (entry->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected")
+ else if (entry->is_pinned) {
+ /* Check for entry changing status and do notifications, etc. */
+ if (!entry->image_up_to_date) {
+ /* Set the image_up_to_date flag */
+ entry->image_up_to_date = TRUE;
+
+ /* Propagate the serialize up the flush dependency chain, if appropriate */
+ if (entry->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_serialized(entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL,
+ "Can't propagate flush dep serialize")
+ } /* end if */
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_mark_entry_serialized() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_move_entry
+ *
+ * Purpose: Use this function to notify the cache that an entry's
+ * file address changed.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, haddr_t new_addr)
+{
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *test_entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(type);
+ HDassert(H5F_addr_defined(old_addr));
+ HDassert(H5F_addr_defined(new_addr));
+ HDassert(H5F_addr_ne(old_addr, new_addr));
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
+
+ if (entry_ptr == NULL || entry_ptr->type != type)
+ /* the old item doesn't exist in the cache, so we are done. */
+ HGOTO_DONE(SUCCEED)
+
+ HDassert(entry_ptr->addr == old_addr);
+ HDassert(entry_ptr->type == type);
+
+ /* Check for R/W status, otherwise error */
+ /* (Moving a R/O entry would mark it dirty, which shouldn't
+ * happen. QAK - 2016/12/02)
+ */
+ if (entry_ptr->is_read_only)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "can't move R/O entry")
+
+ H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL)
+
+ if (test_entry_ptr != NULL) { /* we are hosed */
+ if (test_entry_ptr->type == type)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???")
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?")
+ } /* end if */
+
+ /* If we get this far we have work to do. Remove *entry_ptr from
+ * the hash table (and skip list if necessary), change its address to the
+ * new address, mark it as dirty (if it isn't already) and then re-insert.
+ *
+ * Update the replacement policy for a hit to avoid an eviction before
+ * the moved entry is touched. Update stats for a move.
+ *
+ * Note that we do not check the size of the cache, or evict anything.
+ * Since this is a simple re-name, cache size should be unaffected.
+ *
+ * Check to see if the target entry is in the process of being destroyed
+ * before we delete from the index, etc. If it is, all we do is
+ * change the addr. If the entry is only in the process of being flushed,
+ * don't mark it as dirty either, lest we confuse the flush call back.
+ */
+ if (!entry_ptr->destroy_in_progress) {
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
+
+ if (entry_ptr->in_slist) {
+ HDassert(cache_ptr->slist_ptr);
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE, FAIL)
+ } /* end if */
+ } /* end if */
+
+ entry_ptr->addr = new_addr;
+
+ if (!entry_ptr->destroy_in_progress) {
+ hbool_t was_dirty; /* Whether the entry was previously dirty */
+
+ /* Remember previous dirty status */
+ was_dirty = entry_ptr->is_dirty;
+
+ /* Mark the entry as dirty if it isn't already */
+ entry_ptr->is_dirty = TRUE;
+
+ /* This shouldn't be needed, but it keeps the test code happy */
+ if (entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "Can't propagate serialization status to fd parents")
+ } /* end if */
+
+ /* Modify cache data structures */
+ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+
+ /* Skip some actions if we're in the middle of flushing the entry */
+ if (!entry_ptr->flush_in_progress) {
+ /* Update the replacement policy for the entry */
+ H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
+
+ /* Check for entry changing status and do notifications, etc. */
+ if (!was_dirty) {
+ /* If the entry's type has a 'notify' callback send a 'entry dirtied'
+ * notice now that the entry is fully integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag set")
+
+ /* Propagate the dirty flag up the flush dependency chain if appropriate */
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL,
+ "Can't propagate flush dep dirty flag")
+ } /* end if */
+ } /* end if */
+ } /* end if */
+
+ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_move_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_resize_entry
+ *
+ * Purpose: Resize a pinned or protected entry.
+ *
+ * Resizing an entry dirties it, so if the entry is not
+ * already dirty, the function places the entry on the
+ * skip list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/5/06
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_resize_entry(void *thing, size_t new_size)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ cache_ptr = entry_ptr->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Check for usage errors */
+ if (new_size <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive")
+ if (!(entry_ptr->is_pinned || entry_ptr->is_protected))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ /* update for change in entry size if necessary */
+ if (entry_ptr->size != new_size) {
+ hbool_t was_clean;
+
+ /* make note of whether the entry was clean to begin with */
+ was_clean = !entry_ptr->is_dirty;
+
+ /* mark the entry as dirty if it isn't already */
+ entry_ptr->is_dirty = TRUE;
+
+ /* Reset the image up-to-date status */
+ if (entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "Can't propagate serialization status to fd parents")
+ } /* end if */
+
+ /* Release the current image */
+ if (entry_ptr->image_ptr)
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+
+ /* do a flash cache size increase if appropriate */
+ if (cache_ptr->flash_size_increase_possible) {
+ if (new_size > entry_ptr->size) {
+ size_t size_increase;
+
+ size_increase = new_size - entry_ptr->size;
+ if (size_increase >= cache_ptr->flash_size_increase_threshold)
+ if (H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed")
+ }
+ }
+
+ /* update the pinned and/or protected entry list */
+ if (entry_ptr->is_pinned)
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pel_len, cache_ptr->pel_size, entry_ptr->size,
+ new_size, FAIL)
+ if (entry_ptr->is_protected)
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->pl_len, cache_ptr->pl_size, entry_ptr->size, new_size,
+ FAIL)
+
+#ifdef H5_HAVE_PARALLEL
+ if (entry_ptr->coll_access)
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE(cache_ptr->coll_list_len, cache_ptr->coll_list_size,
+ entry_ptr->size, new_size, FAIL)
+#endif /* H5_HAVE_PARALLEL */
+
+ /* update statistics just before changing the entry size */
+ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size);
+
+ /* update the hash table */
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size, entry_ptr, was_clean, FAIL);
+
+ /* if the entry is in the skip list, update that too */
+ if (entry_ptr->in_slist)
+ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_size);
+
+ /* finally, update the entry size proper */
+ entry_ptr->size = new_size;
+
+ if (!entry_ptr->in_slist)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+
+ if (entry_ptr->is_pinned)
+ H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+
+ /* Check for entry changing status and do notifications, etc. */
+ if (was_clean) {
+ /* If the entry's type has a 'notify' callback send a 'entry dirtied'
+ * notice now that the entry is fully integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
+
+ /* Propagate the dirty flag up the flush dependency chain if appropriate */
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end if */
+ } /* end if */
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_resize_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_pin_protected_entry()
+ *
+ * Purpose: Pin a protected cache entry. The entry must be protected
+ * at the time of call, and must be unpinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/26/06
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_pin_protected_entry(void *thing)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)thing; /* Pointer to entry to pin */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ cache_ptr = entry_ptr->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ /* Only protected entries can be pinned */
+ if (!entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Entry isn't protected")
+
+ /* Pin the entry from a client */
+ if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_pin_protected_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_protect
+ *
+ * Purpose: If the target entry is not in the cache, load it. If
+ * necessary, attempt to evict one or more entries to keep
+ * the cache within its maximum size.
+ *
+ * Mark the target entry as protected, and return its address
+ * to the caller. The caller must call H5C_unprotect() when
+ * finished with the entry.
+ *
+ * While it is protected, the entry may not be either evicted
+ * or flushed -- nor may it be accessed by another call to
+ * H5C_protect. Any attempt to do so will result in a failure.
+ *
+ * Return: Success: Ptr to the desired entry
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer - 6/2/04
+ *
+ *-------------------------------------------------------------------------
+ */
+void *
+H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsigned flags)
+{
+ H5C_t *cache_ptr;
+ H5AC_ring_t ring = H5C_RING_UNDEFINED;
+ hbool_t hit;
+ hbool_t have_write_permitted = FALSE;
+ hbool_t read_only = FALSE;
+ hbool_t flush_last;
+#ifdef H5_HAVE_PARALLEL
+ hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
+#endif /* H5_HAVE_PARALLEL */
+ hbool_t write_permitted = FALSE;
+ hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */
+ size_t empty_space;
+ void *thing;
+ H5C_cache_entry_t *entry_ptr;
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI(NULL)
+
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+
+ cache_ptr = f->shared->cache;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(type);
+ HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
+ HDassert(H5F_addr_defined(addr));
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ /* Load the cache image, if requested */
+ if (cache_ptr->load_image) {
+ cache_ptr->load_image = FALSE;
+ if (H5C__load_cache_image(f) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image")
+ } /* end if */
+
+ read_only = ((flags & H5C__READ_ONLY_FLAG) != 0);
+ flush_last = ((flags & H5C__FLUSH_LAST_FLAG) != 0);
+
+ /* Get the ring type from the API context */
+ ring = H5CX_get_ring();
+
+#ifdef H5_HAVE_PARALLEL
+ if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
+ coll_access = H5F_get_coll_metadata_reads(f);
+#endif /* H5_HAVE_PARALLEL */
+
+ /* first check to see if the target is in cache */
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
+
+ if (entry_ptr != NULL) {
+ if (entry_ptr->ring != ring)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occurred for cache entry")
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ if (entry_ptr->prefetched) {
+ /* This call removes the prefetched entry from the cache,
+ * and replaces it with an entry deserialized from the
+ * image of the prefetched entry.
+ */
+ if (H5C__deserialize_prefetched_entry(f, cache_ptr, &entry_ptr, type, addr, udata) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry")
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(entry_ptr->addr == addr);
+ } /* end if */
+
+ /* Check for trying to load the wrong type of entry from an address */
+ if (entry_ptr->type != type)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, NULL, "incorrect cache entry type")
+
+#ifdef H5_HAVE_PARALLEL
+ /* If this is a collective metadata read, the entry is not marked as
+ * collective, and is clean, it is possible that other processes will
+ * not have it in its cache and will expect a bcast of the entry from
+ * process 0. So process 0 will bcast the entry to all other ranks.
+ * Ranks that _do_ have the entry in their cache still have to
+ * participate in the bcast.
+ */
+ if (coll_access) {
+ if (!entry_ptr->is_dirty && !entry_ptr->coll_access) {
+ MPI_Comm comm; /* File MPI Communicator */
+ int mpi_code; /* MPI error code */
+ int buf_size;
+
+ if (MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
+
+ if (entry_ptr->image_ptr == NULL) {
+ int mpi_rank;
+
+ if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
+
+ if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL,
+ "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ if (0 == mpi_rank && H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
+ /* If image generation fails, push an error but
+ * still participate in the following MPI_Bcast
+ */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
+ } /* end if */
+ HDassert(entry_ptr->image_ptr);
+
+ H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
+ if (MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+
+ /* Mark the entry as collective and insert into the collective list */
+ entry_ptr->coll_access = TRUE;
+ H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
+ } /* end if */
+ else if (entry_ptr->coll_access)
+ H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+#ifdef H5C_DO_TAGGING_SANITY_CHECKS
+ {
+ /* Verify tag value */
+ if (cache_ptr->ignore_tags != TRUE) {
+ haddr_t tag; /* Tag value */
+
+ /* The entry is already in the cache, but make sure that the tag value
+ * is still legal. This will ensure that had the entry NOT been in the
+ * cache, tagging was still set up correctly and it would have received
+ * a legal tag value after getting loaded from disk.
+ */
+
+ /* Get the tag */
+ tag = H5CX_get_tag();
+
+ if (H5C_verify_tag(entry_ptr->type->id, tag) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed")
+ } /* end if */
+ }
+#endif
+
+ hit = TRUE;
+ thing = (void *)entry_ptr;
+ }
+ else {
+ /* must try to load the entry from disk. */
+ hit = FALSE;
+ if (NULL == (thing = H5C__load_entry(f,
+#ifdef H5_HAVE_PARALLEL
+ coll_access,
+#endif /* H5_HAVE_PARALLEL */
+ type, addr, udata)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
+
+ entry_ptr = (H5C_cache_entry_t *)thing;
+ cache_ptr->entries_loaded_counter++;
+
+ entry_ptr->ring = ring;
+#ifdef H5_HAVE_PARALLEL
+ if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
+ H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Apply tag to newly protected entry */
+ if (H5C__tag_entry(cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
+
+ /* If the entry is very large, and we are configured to allow it,
+ * we may wish to perform a flash cache size increase.
+ */
+ if (cache_ptr->flash_size_increase_possible &&
+ (entry_ptr->size > cache_ptr->flash_size_increase_threshold))
+ if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed")
+
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
+ empty_space = 0;
+ else
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ /* try to free up if necceary and if evictions are permitted. Note
+ * that if evictions are enabled, we will call H5C__make_space_in_cache()
+ * regardless if the min_free_space requirement is not met.
+ */
+ if (cache_ptr->evictions_enabled &&
+ (((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) ||
+ ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) {
+
+ size_t space_needed;
+
+ if (empty_space <= entry_ptr->size)
+ cache_ptr->cache_full = TRUE;
+
+ if (cache_ptr->check_write_permitted != NULL) {
+ if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
+ else
+ have_write_permitted = TRUE;
+ } /* end if */
+ else {
+ write_permitted = cache_ptr->write_permitted;
+ have_write_permitted = TRUE;
+ } /* end else */
+
+ HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
+ space_needed = entry_ptr->size;
+ if (space_needed > cache_ptr->max_cache_size)
+ space_needed = cache_ptr->max_cache_size;
+
+ /* Note that space_needed is just the amount of space that
+ * needed to insert the new entry without exceeding the cache
+ * size limit. The subsequent call to H5C__make_space_in_cache()
+ * may evict the entries required to free more or less space
+ * depending on conditions. It MAY be less if the cache is
+ * currently undersized, or more if the cache is oversized.
+ *
+ * The cache can exceed its maximum size limit via the following
+ * mechanisms:
+ *
+ * First, it is possible for the cache to grow without
+ * bound as long as entries are protected and not unprotected.
+ *
+ * Second, when writes are not permitted it is also possible
+ * for the cache to grow without bound.
+ *
+ * Third, the user may choose to disable evictions -- causing
+ * the cache to grow without bound until evictions are
+ * re-enabled.
+ *
+ * Finally, we usually don't check to see if the cache is
+ * oversized at the end of an unprotect. As a result, it is
+ * possible to have a vastly oversized cache with no protected
+ * entries as long as all the protects precede the unprotects.
+ */
+ if (H5C__make_space_in_cache(f, space_needed, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
+ } /* end if */
+
+ /* Insert the entry in the hash table.
+ *
+ * *******************************************
+ *
+ * Set the flush_me_last field
+ * of the newly loaded entry before inserting it into the
+ * index. Must do this, as the index tracked the number of
+ * entries with the flush_last field set, but assumes that
+ * the field will not change after insertion into the index.
+ *
+ * Note that this means that the H5C__FLUSH_LAST_FLAG flag
+ * is ignored if the entry is already in cache.
+ */
+ entry_ptr->flush_me_last = flush_last;
+
+ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, NULL)
+ if (entry_ptr->is_dirty && !entry_ptr->in_slist)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, NULL)
+
+ /* insert the entry in the data structures used by the replacement
+ * policy. We are just going to take it out again when we update
+ * the replacement policy for a protect, but this simplifies the
+ * code. If we do this often enough, we may want to optimize this.
+ */
+ H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, NULL)
+
+ /* Record that the entry was loaded, to trigger a notify callback later */
+ /* (After the entry is fully added to the cache) */
+ was_loaded = TRUE;
+ } /* end else */
+
+ HDassert(entry_ptr->addr == addr);
+ HDassert(entry_ptr->type == type);
+
+ if (entry_ptr->is_protected) {
+ if (read_only && entry_ptr->is_read_only) {
+ HDassert(entry_ptr->ro_ref_count > 0);
+ (entry_ptr->ro_ref_count)++;
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?")
+ } /* end if */
+ else {
+ H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL)
+
+ entry_ptr->is_protected = TRUE;
+ if (read_only) {
+ entry_ptr->is_read_only = TRUE;
+ entry_ptr->ro_ref_count = 1;
+ } /* end if */
+ entry_ptr->dirtied = FALSE;
+ } /* end else */
+
+ H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit)
+ H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
+
+ ret_value = thing;
+
+ if (cache_ptr->evictions_enabled &&
+ (cache_ptr->size_decreased ||
+ (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length)))) {
+
+ if (!have_write_permitted) {
+ if (cache_ptr->check_write_permitted != NULL) {
+ if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted")
+ else
+ have_write_permitted = TRUE;
+ }
+ else {
+ write_permitted = cache_ptr->write_permitted;
+ have_write_permitted = TRUE;
+ }
+ }
+
+ if (cache_ptr->resize_enabled && (cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length))
+ if (H5C__auto_adjust_cache_size(f, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed")
+
+ if (cache_ptr->size_decreased) {
+ cache_ptr->size_decreased = FALSE;
+
+ /* check to see if the cache is now oversized due to the cache
+ * size reduction. If it is, try to evict enough entries to
+ * bring the cache size down to the current maximum cache size.
+ *
+ * Also, if the min_clean_size requirement is not met, we
+ * should also call H5C__make_space_in_cache() to bring us
+ * into compliance.
+ */
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
+ empty_space = 0;
+ else
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ if ((cache_ptr->index_size > cache_ptr->max_cache_size) ||
+ ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)) {
+
+ if (cache_ptr->index_size > cache_ptr->max_cache_size)
+ cache_ptr->cache_full = TRUE;
+
+ if (H5C__make_space_in_cache(f, (size_t)0, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
+ }
+ } /* end if */
+ }
+
+ /* If we loaded the entry and the entry's type has a 'notify' callback, send
+ * an 'after load' notice now that the entry is fully integrated into
+ * the cache and protected. We must wait until it is protected so it is not
+ * evicted during the notify callback.
+ */
+ if (was_loaded)
+ /* If the entry's type has a 'notify' callback send a 'after load'
+ * notice now that the entry is fully integrated into the cache.
+ */
+ if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL,
+ "can't notify client about entry inserted into cache")
+
+#ifdef H5_HAVE_PARALLEL
+ /* Make sure the size of the collective entries in the cache remain in check */
+ if (coll_access) {
+ if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) {
+ if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
+ } /* end if */
+ else {
+ if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100)
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
+ } /* end else */
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_protect() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_unpin_entry()
+ *
+ * Purpose: Unpin a cache entry. The entry can be either protected or
+ * unprotected at the time of call, but must be pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/22/06
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unpin_entry(void *_entry_ptr)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_entry_ptr; /* Pointer to entry to unpin */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(entry_ptr);
+ cache_ptr = entry_ptr->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ /* Unpin the entry */
+ if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client")
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unpin_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_unprotect
+ *
+ * Purpose: Undo an H5C_protect() call -- specifically, mark the
+ * entry as unprotected, remove it from the protected list,
+ * and give it back to the replacement policy.
+ *
+ * The TYPE and ADDR arguments must be the same as those in
+ * the corresponding call to H5C_protect() and the THING
+ * argument must be the value returned by that call to
+ * H5C_protect().
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * If the deleted flag is TRUE, simply remove the target entry
+ * from the cache, clear it, and free it without writing it to
+ * disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/2/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
+{
+ H5C_t *cache_ptr;
+ hbool_t deleted;
+ hbool_t dirtied;
+ hbool_t set_flush_marker;
+ hbool_t pin_entry;
+ hbool_t unpin_entry;
+ hbool_t free_file_space;
+ hbool_t take_ownership;
+ hbool_t was_clean;
+#ifdef H5_HAVE_PARALLEL
+ hbool_t clear_entry = FALSE;
+#endif /* H5_HAVE_PARALLEL */
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *test_entry_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ deleted = ((flags & H5C__DELETED_FLAG) != 0);
+ dirtied = ((flags & H5C__DIRTIED_FLAG) != 0);
+ set_flush_marker = ((flags & H5C__SET_FLUSH_MARKER_FLAG) != 0);
+ pin_entry = ((flags & H5C__PIN_ENTRY_FLAG) != 0);
+ unpin_entry = ((flags & H5C__UNPIN_ENTRY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ cache_ptr = f->shared->cache;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(thing);
+ HDassert(!(pin_entry && unpin_entry));
+
+ /* deleted flag must accompany free_file_space */
+ HDassert((!free_file_space) || (deleted));
+
+ /* deleted flag must accompany take_ownership */
+ HDassert((!take_ownership) || (deleted));
+
+ /* can't have both free_file_space & take_ownership */
+ HDassert(!(free_file_space && take_ownership));
+
+ entry_ptr = (H5C_cache_entry_t *)thing;
+ HDassert(entry_ptr->addr == addr);
+
+ /* also set the dirtied variable if the dirtied field is set in
+ * the entry.
+ */
+ dirtied |= entry_ptr->dirtied;
+ was_clean = !(entry_ptr->is_dirty);
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ /* if the entry has multiple read only protects, just decrement
+ * the ro_ref_counter. Don't actually unprotect until the ref count
+ * drops to zero.
+ */
+ if (entry_ptr->ro_ref_count > 1) {
+ /* Sanity check */
+ HDassert(entry_ptr->is_protected);
+ HDassert(entry_ptr->is_read_only);
+
+ if (dirtied)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
+
+ /* Reduce the RO ref count */
+ (entry_ptr->ro_ref_count)--;
+
+ /* Pin or unpin the entry as requested. */
+ if (pin_entry) {
+ /* Pin the entry from a client */
+ if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
+ }
+ else if (unpin_entry) {
+ /* Unpin the entry from a client */
+ if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
+ } /* end if */
+ }
+ else {
+ if (entry_ptr->is_read_only) {
+ /* Sanity check */
+ HDassert(entry_ptr->ro_ref_count == 1);
+
+ if (dirtied)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
+
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+ } /* end if */
+
+#ifdef H5_HAVE_PARALLEL
+ /* When the H5C code is used to implement the metadata cache in the
+ * PHDF5 case, only the cache on process 0 is allowed to write to file.
+ * All the other metadata caches must hold dirty entries until they
+ * are told that the entries are clean.
+ *
+ * The clear_on_unprotect flag in the H5C_cache_entry_t structure
+ * exists to deal with the case in which an entry is protected when
+ * its cache receives word that the entry is now clean. In this case,
+ * the clear_on_unprotect flag is set, and the entry is flushed with
+ * the H5C__FLUSH_CLEAR_ONLY_FLAG.
+ *
+ * All this is a bit awkward, but until the metadata cache entries
+ * are contiguous, with only one dirty flag, we have to let the supplied
+ * functions deal with the resetting the is_dirty flag.
+ */
+ if (entry_ptr->clear_on_unprotect) {
+ /* Sanity check */
+ HDassert(entry_ptr->is_dirty);
+
+ entry_ptr->clear_on_unprotect = FALSE;
+ if (!dirtied)
+ clear_entry = TRUE;
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+ if (!entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??")
+
+ /* Mark the entry as dirty if appropriate */
+ entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
+ if (dirtied && entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "Can't propagate serialization status to fd parents")
+ } /* end if */
+
+ /* Check for newly dirtied entry */
+ if (was_clean && entry_ptr->is_dirty) {
+ /* Update index for newly dirtied entry */
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr, FAIL)
+
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry dirtied' notice now that the entry is fully
+ * integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
+
+ /* Propagate the flush dep dirty flag up the flush dependency chain
+ * if appropriate
+ */
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end if */
+ /* Check for newly clean entry */
+ else if (!was_clean && !entry_ptr->is_dirty) {
+
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
+ */
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
+
+ /* Propagate the flush dep clean flag up the flush dependency chain
+ * if appropriate
+ */
+ if (entry_ptr->flush_dep_nparents > 0)
+ if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end else-if */
+
+ /* Pin or unpin the entry as requested. */
+ if (pin_entry) {
+ /* Pin the entry from a client */
+ if (H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
+ }
+ else if (unpin_entry) {
+ /* Unpin the entry from a client */
+ if (H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
+ } /* end if */
+
+ /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on
+ * the pinned entry list if entry_ptr->is_pinned is TRUE.
+ */
+ H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL)
+
+ entry_ptr->is_protected = FALSE;
+
+ /* if the entry is dirty, 'or' its flush_marker with the set flush flag,
+ * and then add it to the skip list if it isn't there already.
+ */
+ if (entry_ptr->is_dirty) {
+ entry_ptr->flush_marker |= set_flush_marker;
+ if (!entry_ptr->in_slist)
+ /* this is a no-op if cache_ptr->slist_enabled is FALSE */
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
+
+ /* This implementation of the "deleted" option is a bit inefficient, as
+ * we re-insert the entry to be deleted into the replacement policy
+ * data structures, only to remove them again. Depending on how often
+ * we do this, we may want to optimize a bit.
+ */
+ if (deleted) {
+ unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG);
+
+ /* verify that the target entry is in the cache. */
+ H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
+ if (test_entry_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
+ else if (test_entry_ptr != entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
+ "hash table contains multiple entries for addr?!?")
+
+ /* Set the 'free file space' flag for the flush, if needed */
+ if (free_file_space)
+ flush_flags |= H5C__FREE_FILE_SPACE_FLAG;
+
+ /* Set the "take ownership" flag for the flush, if needed */
+ if (take_ownership)
+ flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
+
+ /* Delete the entry from the skip list on destroy */
+ flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
+
+ HDassert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist)));
+
+ if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
+ } /* end if */
+#ifdef H5_HAVE_PARALLEL
+ else if (clear_entry) {
+ /* Verify that the target entry is in the cache. */
+ H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
+ if (test_entry_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
+ else if (test_entry_ptr != entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
+ "hash table contains multiple entries for addr?!?")
+
+ if (H5C__flush_single_entry(f, entry_ptr,
+ H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
+ } /* end else if */
+#endif /* H5_HAVE_PARALLEL */
+ }
+
+ H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unprotect() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_unsettle_entry_ring
+ *
+ * Purpose: Advise the metadata cache that the specified entry's free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * January 3, 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unsettle_entry_ring(void *_entry)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
+ H5C_t *cache; /* Cache for file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(entry->ring != H5C_RING_UNDEFINED);
+ HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) ||
+ (H5C_RING_MDFSM == entry->ring));
+ cache = entry->cache_ptr;
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ switch (entry->ring) {
+ case H5C_RING_USER:
+ /* Do nothing */
+ break;
+
+ case H5C_RING_RDFSM:
+ if (cache->rdfsm_settled) {
+ if (cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
+ cache->rdfsm_settled = FALSE;
+ } /* end if */
+ break;
+
+ case H5C_RING_MDFSM:
+ if (cache->mdfsm_settled) {
+ if (cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
+ cache->mdfsm_settled = FALSE;
+ } /* end if */
+ break;
+
+ default:
+ HDassert(FALSE); /* this should be un-reachable */
+ break;
+ } /* end switch */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unsettle_entry_ring() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_create_flush_dependency()
+ *
+ * Purpose: Initiates a parent<->child entry flush dependency. The parent
+ * entry must be pinned or protected at the time of call, and must
+ * have all dependencies removed before the cache can shut down.
+ *
+ * Note: Flush dependencies in the cache indicate that a child entry
+ * must be flushed to the file before its parent. (This is
+ * currently used to implement Single-Writer/Multiple-Reader (SWMR)
+ * I/O access for data structures in the file).
+ *
+ * Creating a flush dependency between two entries will also pin
+ * the parent entry.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_create_flush_dependency(void *parent_thing, void *child_thing)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
+ H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+ cache_ptr = parent_entry->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr == child_entry->cache_ptr);
+#ifndef NDEBUG
+ /* Make sure the parent is not already a parent */
+ {
+ unsigned u;
+
+ for (u = 0; u < child_entry->flush_dep_nparents; u++)
+ HDassert(child_entry->flush_dep_parent[u] != parent_entry);
+ } /* end block */
+#endif
+
+ /* More sanity checks */
+ if (child_entry == parent_entry)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
+ if (!(parent_entry->is_protected || parent_entry->is_pinned))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
+
+ /* Check for parent not pinned */
+ if (!parent_entry->is_pinned) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_nchildren == 0);
+ HDassert(!parent_entry->pinned_from_client);
+ HDassert(!parent_entry->pinned_from_cache);
+
+ /* Pin the parent entry */
+ parent_entry->is_pinned = TRUE;
+ H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
+ } /* end else */
+
+ /* Mark the entry as pinned from the cache's action (possibly redundantly) */
+ parent_entry->pinned_from_cache = TRUE;
+
+ /* Check if we need to resize the child's parent array */
+ if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
+ if (child_entry->flush_dep_parent_nalloc == 0) {
+ /* Array does not exist yet, allocate it */
+ HDassert(!child_entry->flush_dep_parent);
+
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
+ } /* end if */
+ else {
+ /* Resize existing array */
+ HDassert(child_entry->flush_dep_parent);
+
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
+ 2 * child_entry->flush_dep_parent_nalloc)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc *= 2;
+ } /* end else */
+ cache_ptr->entry_fd_height_change_counter++;
+ } /* end if */
+
+ /* Add the dependency to the child's parent array */
+ child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
+ child_entry->flush_dep_nparents++;
+
+ /* Increment parent's number of children */
+ parent_entry->flush_dep_nchildren++;
+
+ /* Adjust the number of dirty children */
+ if (child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
+
+ parent_entry->flush_dep_ndirty_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag set")
+ } /* end if */
+
+ /* adjust the parent's number of unserialized children. Note
+ * that it is possible for and entry to be clean and unserialized.
+ */
+ if (!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
+
+ parent_entry->flush_dep_nunser_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag reset")
+ } /* end if */
+
+ /* Post-conditions, for successful operation */
+ HDassert(parent_entry->is_pinned);
+ HDassert(parent_entry->flush_dep_nchildren > 0);
+ HDassert(child_entry->flush_dep_parent);
+ HDassert(child_entry->flush_dep_nparents > 0);
+ HDassert(child_entry->flush_dep_parent_nalloc > 0);
+#ifndef NDEBUG
+ H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
+#endif
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_create_flush_dependency() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_destroy_flush_dependency()
+ *
+ * Purpose: Terminates a parent<-> child entry flush dependency. The
+ * parent entry must be pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_destroy_flush_dependency(void *parent_thing, void *child_thing)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
+ H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+ cache_ptr = parent_entry->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr == child_entry->cache_ptr);
+
+ /* Usage checks */
+ if (!parent_entry->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
+ if (NULL == child_entry->flush_dep_parent)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Child entry doesn't have a flush dependency parent array")
+ if (0 == parent_entry->flush_dep_nchildren)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Parent entry flush dependency ref. count has no child dependencies")
+
+ /* Search for parent in child's parent array. This is a linear search
+ * because we do not expect large numbers of parents. If this changes, we
+ * may wish to change the parent array to a skip list */
+ for (u = 0; u < child_entry->flush_dep_nparents; u++)
+ if (child_entry->flush_dep_parent[u] == parent_entry)
+ break;
+ if (u == child_entry->flush_dep_nparents)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Parent entry isn't a flush dependency parent for child entry")
+
+ /* Remove parent entry from child's parent array */
+ if (u < (child_entry->flush_dep_nparents - 1))
+ HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1],
+ (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
+ child_entry->flush_dep_nparents--;
+
+ /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
+ parent_entry->flush_dep_nchildren--;
+ if (0 == parent_entry->flush_dep_nchildren) {
+ /* Sanity check */
+ HDassert(parent_entry->pinned_from_cache);
+
+ /* Check if we should unpin parent entry now */
+ if (!parent_entry->pinned_from_client)
+ if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
+
+ /* Mark the entry as unpinned from the cache's action */
+ parent_entry->pinned_from_cache = FALSE;
+ } /* end if */
+
+ /* Adjust parent entry's ndirty_children */
+ if (child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children > 0);
+
+ parent_entry->flush_dep_ndirty_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag reset")
+ } /* end if */
+
+ /* adjust parent entry's number of unserialized children */
+ if (!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children > 0);
+
+ parent_entry->flush_dep_nunser_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag set")
+ } /* end if */
+
+ /* Shrink or free the parent array if appropriate */
+ if (child_entry->flush_dep_nparents == 0) {
+ child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent);
+ child_entry->flush_dep_parent_nalloc = 0;
+ } /* end if */
+ else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT &&
+ child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
+ child_entry->flush_dep_parent_nalloc / 4)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc /= 4;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_destroy_flush_dependency() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_expunge_entry
+ *
+ * Purpose: Expunge an entry from the cache without writing it to disk
+ * even if it is dirty. The entry may not be either pinned or
+ * protected.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/29/06
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flags)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG);
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(type);
+ HDassert(H5F_addr_defined(addr));
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ /* Look for entry in cache */
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+ if ((entry_ptr == NULL) || (entry_ptr->type != type))
+ /* the target doesn't exist in the cache, so we are done. */
+ HGOTO_DONE(SUCCEED)
+
+ HDassert(entry_ptr->addr == addr);
+ HDassert(entry_ptr->type == type);
+
+ /* Check for entry being pinned or protected */
+ if (entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected")
+ if (entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned")
+
+ /* If we get this far, call H5C__flush_single_entry() with the
+ * H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG.
+ * This will clear the entry, and then delete it from the cache.
+ */
+
+ /* Pass along 'free file space' flag */
+ flush_flags |= (flags & H5C__FREE_FILE_SPACE_FLAG);
+
+ /* Delete the entry from the skip list on destroy */
+ flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
+
+ if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry")
+
+done:
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_lru_list(cache_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_expunge_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_remove_entry
+ *
+ * Purpose: Remove an entry from the cache. Must be not protected, pinned,
+ * dirty, involved in flush dependencies, etc.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * September 17, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_remove_entry(void *_entry)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */
+ H5C_t *cache; /* Cache for file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(entry->ring != H5C_RING_UNDEFINED);
+ cache = entry->cache_ptr;
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Check for error conditions */
+ if (entry->is_dirty)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
+ if (entry->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
+ if (entry->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
+ /* NOTE: If these two errors are getting tripped because the entry is
+ * in a flush dependency with a freedspace entry, move the checks
+ * after the "before evict" message is sent, and add the
+ * "child being evicted" message to the "before evict" notify
+ * section below. QAK - 2017/08/03
+ */
+ if (entry->flush_dep_nparents > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
+ "can't remove entry with flush dependency parents from cache")
+ if (entry->flush_dep_nchildren > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
+ "can't remove entry with flush dependency children from cache")
+
+ /* Additional internal cache consistency checks */
+ HDassert(!entry->in_slist);
+ HDassert(!entry->flush_marker);
+ HDassert(!entry->flush_in_progress);
+
+ /* Note that the algorithm below is (very) similar to the set of operations
+ * in H5C__flush_single_entry() and should be kept in sync with changes
+ * to that code. - QAK, 2016/11/30
+ */
+
+ /* Update stats, as if we are "destroying" and taking ownership of the entry */
+ H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
+
+ /* If the entry's type has a 'notify' callback, send a 'before eviction'
+ * notice while the entry is still fully integrated in the cache.
+ */
+ if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+
+ /* Update the cache internal data structures as appropriate for a destroy.
+ * Specifically:
+ * 1) Delete it from the index
+ * 2) Delete it from the collective read access list
+ * 3) Update the replacement policy for eviction
+ * 4) Remove it from the tag list for this object
+ */
+
+ H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
+
+#ifdef H5_HAVE_PARALLEL
+ /* Check for collective read access flag */
+ if (entry->coll_access) {
+ entry->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+ H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
+
+ /* Remove entry from tag list */
+ if (H5C__untag_entry(cache, entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+
+ /* Increment entries_removed_counter and set last_entry_removed_ptr.
+ * As we me be about to free the entry, recall that last_entry_removed_ptr
+ * must NEVER be dereferenced.
+ *
+ * Recall that these fields are maintained to allow functions that perform
+ * scans of lists of entries to detect the unexpected removal of entries
+ * (via expunge, eviction, or take ownership at present), so that they can
+ * re-start their scans if necessary.
+ *
+ * Also check if the entry we are watching for removal is being
+ * removed (usually the 'next' entry for an iteration) and reset
+ * it to indicate that it was removed.
+ */
+ cache->entries_removed_counter++;
+ cache->last_entry_removed_ptr = entry;
+ if (entry == cache->entry_watched_for_removal)
+ cache->entry_watched_for_removal = NULL;
+
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
+ *
+ * Now clean up internal cache fields if appropriate.
+ */
+
+ /* Free the buffer for the on disk image */
+ if (entry->image_ptr != NULL)
+ entry->image_ptr = H5MM_xfree(entry->image_ptr);
+
+ /* Reset the pointer to the cache the entry is within */
+ entry->cache_ptr = NULL;
+
+ /* Client is taking ownership of the entry. Set bad magic here so the
+ * cache will choke unless the entry is re-inserted properly
+ */
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__remove_entry() */
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index 6d211a4..df60d00 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -105,21 +105,11 @@
(cache_ptr)->images_loaded++; \
(cache_ptr)->last_image_size = (cache_ptr)->image_len; \
}
-#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \
-{ \
- (cache_ptr)->prefetches++; \
- if (dirty) \
- (cache_ptr)->dirty_prefetches++; \
-}
-#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \
- (cache_ptr)->prefetch_hits++;
/* clang-format on */
#else /* H5C_COLLECT_CACHE_STATS */
#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr)
#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr)
#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr)
-#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty)
-#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
#endif /* H5C_COLLECT_CACHE_STATS */
/******************/
@@ -138,8 +128,6 @@ static herr_t H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr, c
static herr_t H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr, const uint8_t **buf,
unsigned entry_num);
#endif
-static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr,
- H5C_cache_entry_t **fd_children);
static herr_t H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr, uint8_t **buf);
static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf, unsigned entry_num);
static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr);
@@ -171,7 +159,6 @@ H5FL_DEFINE(H5C_cache_entry_t);
/*******************/
/*-------------------------------------------------------------------------
- *
* Function: H5C_cache_image_pending()
*
* Purpose: Tests to see if the load of a metadata cache image
@@ -460,375 +447,6 @@ done:
} /* H5C__generate_cache_image() */
/*-------------------------------------------------------------------------
- * Function: H5C__deserialize_prefetched_entry()
- *
- * Purpose: Deserialize the supplied prefetched entry entry, and return
- * a pointer to the deserialized entry in *entry_ptr_ptr.
- * If successful, remove the prefetched entry from the cache,
- * and free it. Insert the deserialized entry into the cache.
- *
- * Note that the on disk image of the entry is not freed --
- * a pointer to it is stored in the deserialized entries'
- * image_ptr field, and its image_up_to_date field is set to
- * TRUE unless the entry is dirtied by the deserialize call.
- *
- * If the prefetched entry is a flush dependency child,
- * destroy that flush dependency prior to calling the
- * deserialize callback. If appropriate, the flush dependency
- * relationship will be recreated by the cache client.
- *
- * If the prefetched entry is a flush dependency parent,
- * destroy the flush dependency relationship with all its
- * children. As all these children must be prefetched entries,
- * recreate these flush dependency relationships with
- * deserialized entry after it is inserted in the cache.
- *
- * Since deserializing a prefetched entry is semantically
- * equivalent to a load, issue an entry loaded nofification
- * if the notify callback is defined.
- *
- * Return: SUCCEED on success, and FAIL on failure.
- *
- * Note that *entry_ptr_ptr is undefined on failure.
- *
- * Programmer: John Mainzer, 8/10/15
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr,
- const H5C_class_t *type, haddr_t addr, void *udata)
-{
- hbool_t dirty = FALSE; /* Flag indicating whether thing was
- * dirtied during deserialize
- */
- size_t len; /* Size of image in file */
- void *thing = NULL; /* Pointer to thing loaded */
- H5C_cache_entry_t *pf_entry_ptr; /* pointer to the prefetched entry */
- /* supplied in *entry_ptr_ptr. */
- H5C_cache_entry_t *ds_entry_ptr; /* Alias for thing loaded, as cache
- * entry
- */
- H5C_cache_entry_t **fd_children = NULL; /* Pointer to a dynamically */
- /* allocated array of pointers to */
- /* the flush dependency children of */
- /* the prefetched entry, or NULL if */
- /* that array does not exist. */
- unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG);
- int i;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* sanity checks */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->cache);
- HDassert(f->shared->cache == cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr_ptr);
- HDassert(*entry_ptr_ptr);
- pf_entry_ptr = *entry_ptr_ptr;
- HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(pf_entry_ptr->type);
- HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- HDassert(pf_entry_ptr->prefetched);
- HDassert(pf_entry_ptr->image_up_to_date);
- HDassert(pf_entry_ptr->image_ptr);
- HDassert(pf_entry_ptr->size > 0);
- HDassert(pf_entry_ptr->addr == addr);
- HDassert(type);
- HDassert(type->id == pf_entry_ptr->prefetch_type_id);
- HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
-
- /* verify absence of prohibited or unsupported type flag combinations */
- HDassert(!(type->flags & H5C__CLASS_SKIP_READS));
-
- /* Can't see how skip reads could be usefully combined with
- * either the speculative read flag. Hence disallow.
- */
- HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
- HDassert(H5F_addr_defined(addr));
- HDassert(type->get_initial_load_size);
- HDassert(type->deserialize);
-
- /* if *pf_entry_ptr is a flush dependency child, destroy all such
- * relationships now. The client will restore the relationship(s) with
- * the deserialized entry if appropriate.
- */
- HDassert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents);
- for (i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) {
- HDassert(pf_entry_ptr->flush_dep_parent);
- HDassert(pf_entry_ptr->flush_dep_parent[i]);
- HDassert(pf_entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0);
- HDassert(pf_entry_ptr->fd_parent_addrs);
- HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]);
-
- if (H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency")
-
- pf_entry_ptr->fd_parent_addrs[i] = HADDR_UNDEF;
- } /* end for */
- HDassert(pf_entry_ptr->flush_dep_nparents == 0);
-
- /* If *pf_entry_ptr is a flush dependency parent, destroy its flush
- * dependency relationships with all its children (which must be
- * prefetched entries as well).
- *
- * These flush dependency relationships will have to be restored
- * after the deserialized entry is inserted into the cache in order
- * to transfer these relationships to the new entry. Hence save the
- * pointers to the flush dependency children of *pf_enty_ptr for later
- * use.
- */
- if (pf_entry_ptr->fd_child_count > 0) {
- if (NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc(
- sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1))))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array")
-
- if (H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "can't destroy pf entry child flush dependency(s).")
- } /* end if */
-
- /* Since the size of the on disk image is known exactly, there is
- * no need for either a call to the get_initial_load_size() callback,
- * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set.
- * Similarly, there is no need to clamp possible reads beyond
- * EOF.
- */
- len = pf_entry_ptr->size;
-
- /* Deserialize the prefetched on-disk image of the entry into the
- * native memory form
- */
- if (NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image")
- ds_entry_ptr = (H5C_cache_entry_t *)thing;
-
- /* In general, an entry should be clean just after it is loaded.
- *
- * However, when this code is used in the metadata cache, it is
- * possible that object headers will be dirty at this point, as
- * the deserialize function will alter object headers if necessary to
- * fix an old bug.
- *
- * In the following assert:
- *
- * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
- *
- * note that type ids 5 & 6 are associated with object headers in the
- * metadata cache.
- *
- * When we get to using H5C for other purposes, we may wish to
- * tighten up the assert so that the loophole only applies to the
- * metadata cache.
- *
- * Note that at present, dirty can't be set to true with prefetched
- * entries. However this may change, so include this functionality
- * against that possibility.
- *
- * Also, note that it is possible for a prefetched entry to be dirty --
- * hence the value assigned to ds_entry_ptr->is_dirty below.
- */
-
- HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
-
- ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- ds_entry_ptr->cache_ptr = f->shared->cache;
- ds_entry_ptr->addr = addr;
- ds_entry_ptr->size = len;
- HDassert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE);
- ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr;
- ds_entry_ptr->image_up_to_date = !dirty;
- ds_entry_ptr->type = type;
- ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty;
- ds_entry_ptr->dirtied = FALSE;
- ds_entry_ptr->is_protected = FALSE;
- ds_entry_ptr->is_read_only = FALSE;
- ds_entry_ptr->ro_ref_count = 0;
- ds_entry_ptr->is_pinned = FALSE;
- ds_entry_ptr->in_slist = FALSE;
- ds_entry_ptr->flush_marker = FALSE;
-#ifdef H5_HAVE_PARALLEL
- ds_entry_ptr->clear_on_unprotect = FALSE;
- ds_entry_ptr->flush_immediately = FALSE;
- ds_entry_ptr->coll_access = FALSE;
-#endif /* H5_HAVE_PARALLEL */
- ds_entry_ptr->flush_in_progress = FALSE;
- ds_entry_ptr->destroy_in_progress = FALSE;
-
- ds_entry_ptr->ring = pf_entry_ptr->ring;
-
- /* Initialize flush dependency height fields */
- ds_entry_ptr->flush_dep_parent = NULL;
- ds_entry_ptr->flush_dep_nparents = 0;
- ds_entry_ptr->flush_dep_parent_nalloc = 0;
- ds_entry_ptr->flush_dep_nchildren = 0;
- ds_entry_ptr->flush_dep_ndirty_children = 0;
- ds_entry_ptr->flush_dep_nunser_children = 0;
-
- /* Initialize fields supporting the hash table: */
- ds_entry_ptr->ht_next = NULL;
- ds_entry_ptr->ht_prev = NULL;
- ds_entry_ptr->il_next = NULL;
- ds_entry_ptr->il_prev = NULL;
-
- /* Initialize fields supporting replacement policies: */
- ds_entry_ptr->next = NULL;
- ds_entry_ptr->prev = NULL;
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- ds_entry_ptr->aux_next = NULL;
- ds_entry_ptr->aux_prev = NULL;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-#ifdef H5_HAVE_PARALLEL
- pf_entry_ptr->coll_next = NULL;
- pf_entry_ptr->coll_prev = NULL;
-#endif /* H5_HAVE_PARALLEL */
-
- /* Initialize cache image related fields */
- ds_entry_ptr->include_in_image = FALSE;
- ds_entry_ptr->lru_rank = 0;
- ds_entry_ptr->image_dirty = FALSE;
- ds_entry_ptr->fd_parent_count = 0;
- ds_entry_ptr->fd_parent_addrs = NULL;
- ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count;
- ds_entry_ptr->fd_dirty_child_count = 0;
- ds_entry_ptr->image_fd_height = 0;
- ds_entry_ptr->prefetched = FALSE;
- ds_entry_ptr->prefetch_type_id = 0;
- ds_entry_ptr->age = 0;
- ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty;
-#ifndef NDEBUG /* debugging field */
- ds_entry_ptr->serialization_count = 0;
-#endif
-
- H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr);
-
- /* Apply to to the newly deserialized entry */
- if (H5C__tag_entry(cache_ptr, ds_entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
-
- /* We have successfully deserialized the prefetched entry.
- *
- * Before we return a pointer to the deserialized entry, we must remove
- * the prefetched entry from the cache, discard it, and replace it with
- * the deserialized entry. Note that we do not free the prefetched
- * entries image, as that has been transferred to the deserialized
- * entry.
- *
- * Also note that we have not yet restored any flush dependencies. This
- * must wait until the deserialized entry is inserted in the cache.
- *
- * To delete the prefetched entry from the cache:
- *
- * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already
- * transferred the buffer containing the image to *ds_entry_ptr,
- * this is not a memory leak.
- *
- * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG
- * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set.
- */
- pf_entry_ptr->image_ptr = NULL;
-
- if (pf_entry_ptr->is_dirty) {
- HDassert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) ||
- ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist)));
-
- flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
- } /* end if */
-
- if (H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't expunge prefetched entry")
-
-#ifndef NDEGUG /* verify deletion */
- H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL);
-
- HDassert(NULL == pf_entry_ptr);
-#endif
-
- /* Insert the deserialized entry into the cache. */
- H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL)
-
- HDassert(!ds_entry_ptr->in_slist);
- if (ds_entry_ptr->is_dirty)
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL)
-
- H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL)
-
- /* Deserializing a prefetched entry is the conceptual equivalent of
- * loading it from file. If the deserialized entry has a notify callback,
- * send an "after load" notice now that the deserialized entry is fully
- * integrated into the cache.
- */
- if (ds_entry_ptr->type->notify &&
- (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache")
-
- /* Restore flush dependencies with the flush dependency children of
- * of the prefetched entry. Note that we must protect *ds_entry_ptr
- * before the call to avoid triggering sanity check failures, and
- * then unprotect it afterwards.
- */
- i = 0;
- if (fd_children != NULL) {
- H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL)
- ds_entry_ptr->is_protected = TRUE;
- while (fd_children[i] != NULL) {
- /* Sanity checks */
- HDassert((fd_children[i])->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert((fd_children[i])->prefetched);
- HDassert((fd_children[i])->fd_parent_count > 0);
- HDassert((fd_children[i])->fd_parent_addrs);
-
-#ifndef NDEBUG
- {
- int j;
- hbool_t found;
-
- j = 0;
- found = FALSE;
- while ((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) {
- if ((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr)
- found = TRUE;
-
- j++;
- } /* end while */
- HDassert(found);
- }
-#endif
-
- if (H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency")
-
- i++;
- } /* end while */
-
- H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL);
- ds_entry_ptr->is_protected = FALSE;
- } /* end if ( fd_children != NULL ) */
- HDassert((unsigned)i == ds_entry_ptr->fd_child_count);
-
- ds_entry_ptr->fd_child_count = 0;
- H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
-
- /* finally, pass ds_entry_ptr back to the caller */
- *entry_ptr_ptr = ds_entry_ptr;
-
-done:
- if (fd_children)
- fd_children = (H5C_cache_entry_t **)H5MM_xfree((void *)fd_children);
-
- /* Release resources on error */
- if (FAIL == ret_value)
- if (thing && type->free_icr(thing) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__deserialize_prefetched_entry() */
-
-/*-------------------------------------------------------------------------
* Function: H5C__free_image_entries_array
*
* Purpose: If the image entries array exists, free the image
@@ -896,7 +514,7 @@ H5C__free_image_entries_array(H5C_t *cache_ptr)
} /* H5C__free_image_entries_array() */
/*-------------------------------------------------------------------------
- * Function: H5C_get_cache_image_config
+ * Function: H5C__get_cache_image_config
*
* Purpose: Copy the current configuration for cache image generation
* on file close into the instance of H5C_cache_image_ctl_t
@@ -910,11 +528,11 @@ H5C__free_image_entries_array(H5C_t *cache_ptr)
*-------------------------------------------------------------------------
*/
herr_t
-H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr)
+H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry")
@@ -925,73 +543,7 @@ H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_get_cache_image_config() */
-
-/*-------------------------------------------------------------------------
- * Function: H5C_image_stats
- *
- * Purpose: Prints statistics specific to the cache image.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 10/26/15
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-#if H5C_COLLECT_CACHE_STATS
-H5C_image_stats(H5C_t *cache_ptr, hbool_t print_header)
-#else /* H5C_COLLECT_CACHE_STATS */
-H5C_image_stats(H5C_t *cache_ptr, hbool_t H5_ATTR_UNUSED print_header)
-#endif /* H5C_COLLECT_CACHE_STATS */
-{
-#if H5C_COLLECT_CACHE_STATS
- int i;
- int64_t total_hits = 0;
- int64_t total_misses = 0;
- double hit_rate;
- double prefetch_use_rate;
-#endif /* H5C_COLLECT_CACHE_STATS */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- if (!cache_ptr || cache_ptr->magic != H5C__H5C_T_MAGIC)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
-
-#if H5C_COLLECT_CACHE_STATS
- for (i = 0; i <= cache_ptr->max_type_id; i++) {
- total_hits += cache_ptr->hits[i];
- total_misses += cache_ptr->misses[i];
- } /* end for */
-
- if ((total_hits > 0) || (total_misses > 0))
- hit_rate = 100.0 * ((double)(total_hits)) / ((double)(total_hits + total_misses));
- else
- hit_rate = 0.0;
-
- if (cache_ptr->prefetches > 0)
- prefetch_use_rate = 100.0 * ((double)(cache_ptr->prefetch_hits)) / ((double)(cache_ptr->prefetches));
- else
- prefetch_use_rate = 0.0;
-
- if (print_header) {
- HDfprintf(stdout, "\nhit prefetches prefetch image pf hit\n");
- HDfprintf(stdout, "rate: total: dirty: hits: flshs: evct: size: rate:\n");
- } /* end if */
-
- HDfprintf(stdout, "%3.1lf %5lld %5lld %5lld %5lld %5lld %5lld %3.1lf\n", hit_rate,
- (long long)(cache_ptr->prefetches), (long long)(cache_ptr->dirty_prefetches),
- (long long)(cache_ptr->prefetch_hits),
- (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]),
- (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]),
- (long long)(cache_ptr->last_image_size), prefetch_use_rate);
-#endif /* H5C_COLLECT_CACHE_STATS */
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_image_stats() */
+} /* H5C__get_cache_image_config() */
/*-------------------------------------------------------------------------
* Function: H5C__read_cache_image
@@ -2010,138 +1562,6 @@ done:
#endif
/*-------------------------------------------------------------------------
- * Function: H5C__destroy_pf_entry_child_flush_deps()
- *
- * Purpose: Destroy all flush dependencies in this the supplied
- * prefetched entry is the parent. Note that the children
- * in these flush dependencies must be prefetched entries as
- * well.
- *
- * As this action is part of the process of transferring all
- * such flush dependencies to the deserialized version of the
- * prefetched entry, ensure that the data necessary to complete
- * the transfer is retained.
- *
- * Note: The current implementation of this function is
- * quite inefficient -- mostly due to the current
- * implementation of flush dependencies. This should
- * be fixed at some point.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 8/11/15
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr, H5C_cache_entry_t *pf_entry_ptr,
- H5C_cache_entry_t **fd_children)
-{
- H5C_cache_entry_t *entry_ptr;
-#ifndef NDEBUG
- unsigned entries_visited = 0;
-#endif
- int fd_children_found = 0;
- hbool_t found;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(pf_entry_ptr);
- HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(pf_entry_ptr->type);
- HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- HDassert(pf_entry_ptr->prefetched);
- HDassert(pf_entry_ptr->fd_child_count > 0);
- HDassert(fd_children);
-
- /* Scan each entry on the index list */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Here we look at entry_ptr->flush_dep_nparents and not
- * entry_ptr->fd_parent_count as it is possible that some
- * or all of the prefetched flush dependency child relationships
- * have already been destroyed.
- */
- if (entry_ptr->prefetched && (entry_ptr->flush_dep_nparents > 0)) {
- unsigned u; /* Local index variable */
-
- /* Re-init */
- u = 0;
- found = FALSE;
-
- /* Sanity checks */
- HDassert(entry_ptr->type);
- HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- HDassert(entry_ptr->fd_parent_count >= entry_ptr->flush_dep_nparents);
- HDassert(entry_ptr->fd_parent_addrs);
- HDassert(entry_ptr->flush_dep_parent);
-
- /* Look for correct entry */
- while (!found && (u < entry_ptr->fd_parent_count)) {
- /* Sanity check entry */
- HDassert(entry_ptr->flush_dep_parent[u]);
- HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Correct entry? */
- if (pf_entry_ptr == entry_ptr->flush_dep_parent[u])
- found = TRUE;
-
- u++;
- } /* end while */
-
- if (found) {
- HDassert(NULL == fd_children[fd_children_found]);
-
- /* Remove flush dependency */
- fd_children[fd_children_found] = entry_ptr;
- fd_children_found++;
- if (H5C_destroy_flush_dependency(pf_entry_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "can't destroy pf entry child flush dependency")
-
-#ifndef NDEBUG
- /* Sanity check -- verify that the address of the parent
- * appears in entry_ptr->fd_parent_addrs. Must do a search,
- * as with flush dependency creates and destroys,
- * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent
- * can list parents in different order.
- */
- found = FALSE;
- u = 0;
- while (!found && u < entry_ptr->fd_parent_count) {
- if (pf_entry_ptr->addr == entry_ptr->fd_parent_addrs[u])
- found = TRUE;
- u++;
- } /* end while */
- HDassert(found);
-#endif
- } /* end if */
- } /* end if */
-
-#ifndef NDEBUG
- entries_visited++;
-#endif
- entry_ptr = entry_ptr->il_next;
- } /* end while */
-
- /* Post-op sanity checks */
- HDassert(NULL == fd_children[fd_children_found]);
- HDassert((unsigned)fd_children_found == pf_entry_ptr->fd_child_count);
- HDassert(entries_visited == cache_ptr->index_len);
- HDassert(!pf_entry_ptr->is_pinned);
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__destroy_pf_entry_child_flush_deps() */
-
-/*-------------------------------------------------------------------------
* Function: H5C__encode_cache_image_header()
*
* Purpose: Encode the metadata cache image buffer header in the
diff --git a/src/H5Cint.c b/src/H5Cint.c
new file mode 100644
index 0000000..5200ac6
--- /dev/null
+++ b/src/H5Cint.c
@@ -0,0 +1,2578 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Centry.c
+ * May 8 2023
+ * Quincey Koziol
+ *
+ * Purpose: Routines which operate on cache entries.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Cmodule.h" /* This source code file is part of the H5C module */
+#define H5F_FRIEND /* suppress error about including H5Fpkg */
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Cpkg.h" /* Cache */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5MFprivate.h" /* File memory management */
+
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+static herr_t H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
+ size_t *new_max_cache_size_ptr, hbool_t write_permitted);
+static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr);
+static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted);
+static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr);
+static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
+static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring);
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__auto_adjust_cache_size
+ *
+ * Purpose: Obtain the current full cache hit rate, and compare it
+ * with the hit rate thresholds for modifying cache size.
+ * If one of the thresholds has been crossed, adjusts the
+ * size of the cache accordingly.
+ *
+ * The function then resets the full cache hit rate
+ * statistics, and exits.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 10/7/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ hbool_t reentrant_call = FALSE;
+ hbool_t inserted_epoch_marker = FALSE;
+ size_t new_max_cache_size = 0;
+ size_t old_max_cache_size = 0;
+ size_t new_min_clean_size = 0;
+ size_t old_min_clean_size = 0;
+ double hit_rate;
+ enum H5C_resize_status status = in_spec; /* will change if needed */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->cache_accesses >= cache_ptr->resize_ctl.epoch_length);
+ HDassert(0.0 <= cache_ptr->resize_ctl.min_clean_fraction);
+ HDassert(cache_ptr->resize_ctl.min_clean_fraction <= 100.0);
+
+ /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the resize
+ * process. To avoid an infinite recursion, set reentrant_call to
+ * TRUE, and goto done.
+ */
+ if (cache_ptr->resize_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
+
+ cache_ptr->resize_in_progress = TRUE;
+
+ if (!cache_ptr->resize_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
+
+ HDassert((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) ||
+ (cache_ptr->resize_ctl.decr_mode != H5C_decr__off));
+
+ if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+
+ HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0));
+
+ switch (cache_ptr->resize_ctl.incr_mode) {
+ case H5C_incr__off:
+ if (cache_ptr->size_increase_possible)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
+ break;
+
+ case H5C_incr__threshold:
+ if (hit_rate < cache_ptr->resize_ctl.lower_hr_threshold) {
+ if (!cache_ptr->size_increase_possible)
+ status = increase_disabled;
+ else if (cache_ptr->max_cache_size >= cache_ptr->resize_ctl.max_size) {
+ HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.max_size);
+ status = at_max_size;
+ }
+ else if (!cache_ptr->cache_full)
+ status = not_full;
+ else {
+ new_max_cache_size =
+ (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.increment);
+
+ /* clip to max size if necessary */
+ if (new_max_cache_size > cache_ptr->resize_ctl.max_size)
+ new_max_cache_size = cache_ptr->resize_ctl.max_size;
+
+ /* clip to max increment if necessary */
+ if (cache_ptr->resize_ctl.apply_max_increment &&
+ ((cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment) <
+ new_max_cache_size))
+ new_max_cache_size = cache_ptr->max_cache_size + cache_ptr->resize_ctl.max_increment;
+
+ status = increase;
+ }
+ }
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
+ }
+
+ /* If the decr_mode is either age out or age out with threshold, we
+ * must run the marker maintenance code, whether we run the size
+ * reduction code or not. We do this in two places -- here we
+ * insert a new marker if the number of active epoch markers is
+ * is less than the current epochs before eviction, and after
+ * the ageout call, we cycle the markers.
+ *
+ * However, we can't call the ageout code or cycle the markers
+ * unless there was a full complement of markers in place on
+ * entry. The inserted_epoch_marker flag is used to track this.
+ */
+
+ if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) ||
+ (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) &&
+ (cache_ptr->epoch_markers_active < cache_ptr->resize_ctl.epochs_before_eviction)) {
+
+ if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
+
+ inserted_epoch_marker = TRUE;
+ }
+
+ /* don't run the cache size decrease code unless the cache size
+ * increase code is disabled, or the size increase code sees no need
+ * for action. In either case, status == in_spec at this point.
+ */
+
+ if (status == in_spec) {
+ switch (cache_ptr->resize_ctl.decr_mode) {
+ case H5C_decr__off:
+ break;
+
+ case H5C_decr__threshold:
+ if (hit_rate > cache_ptr->resize_ctl.upper_hr_threshold) {
+ if (!cache_ptr->size_decrease_possible)
+ status = decrease_disabled;
+ else if (cache_ptr->max_cache_size <= cache_ptr->resize_ctl.min_size) {
+ HDassert(cache_ptr->max_cache_size == cache_ptr->resize_ctl.min_size);
+ status = at_min_size;
+ }
+ else {
+ new_max_cache_size =
+ (size_t)(((double)(cache_ptr->max_cache_size)) * cache_ptr->resize_ctl.decrement);
+
+ /* clip to min size if necessary */
+ if (new_max_cache_size < cache_ptr->resize_ctl.min_size)
+ new_max_cache_size = cache_ptr->resize_ctl.min_size;
+
+ /* clip to max decrement if necessary */
+ if (cache_ptr->resize_ctl.apply_max_decrement &&
+ ((cache_ptr->resize_ctl.max_decrement + new_max_cache_size) <
+ cache_ptr->max_cache_size))
+ new_max_cache_size =
+ cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement;
+
+ status = decrease;
+ }
+ }
+ break;
+
+ case H5C_decr__age_out_with_threshold:
+ case H5C_decr__age_out:
+ if (!inserted_epoch_marker) {
+ if (!cache_ptr->size_decrease_possible)
+ status = decrease_disabled;
+ else {
+ if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size,
+ write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
+ } /* end else */
+ } /* end if */
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
+ }
+ }
+
+ /* cycle the epoch markers here if appropriate */
+ if (((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) ||
+ (cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold)) &&
+ !inserted_epoch_marker)
+ /* move last epoch marker to the head of the LRU list */
+ if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
+
+ if ((status == increase) || (status == decrease)) {
+ old_max_cache_size = cache_ptr->max_cache_size;
+ old_min_clean_size = cache_ptr->min_clean_size;
+
+ new_min_clean_size =
+ (size_t)((double)new_max_cache_size * (cache_ptr->resize_ctl.min_clean_fraction));
+
+ /* new_min_clean_size is of size_t, and thus must be non-negative.
+ * Hence we have
+ *
+ * ( 0 <= new_min_clean_size ).
+ *
+ * by definition.
+ */
+ HDassert(new_min_clean_size <= new_max_cache_size);
+ HDassert(cache_ptr->resize_ctl.min_size <= new_max_cache_size);
+ HDassert(new_max_cache_size <= cache_ptr->resize_ctl.max_size);
+
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
+
+ if (status == increase)
+ cache_ptr->cache_full = FALSE;
+ else if (status == decrease)
+ cache_ptr->size_decreased = TRUE;
+
+ /* update flash cache size increase fields as appropriate */
+ if (cache_ptr->flash_size_increase_possible) {
+ switch (cache_ptr->resize_ctl.flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
+
+ case H5C_flash_incr__add_space:
+ cache_ptr->flash_size_increase_threshold =
+ (size_t)(((double)(cache_ptr->max_cache_size)) *
+ (cache_ptr->resize_ctl.flash_threshold));
+ break;
+
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
+ }
+ }
+ }
+
+ if (cache_ptr->resize_ctl.rpt_fcn != NULL)
+ (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status,
+ old_max_cache_size, new_max_cache_size, old_min_clean_size,
+ new_min_clean_size);
+
+ if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+
+done:
+ /* Sanity checks */
+ HDassert(cache_ptr->resize_in_progress);
+ if (!reentrant_call)
+ cache_ptr->resize_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__auto_adjust_cache_size() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__autoadjust__ageout
+ *
+ * Purpose: Implement the ageout automatic cache size decrement
+ * algorithm. Note that while this code evicts aged out
+ * entries, the code does not change the maximum cache size.
+ * Instead, the function simply computes the new value (if
+ * any change is indicated) and reports this value in
+ * *new_max_cache_size_ptr.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 11/18/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
+ size_t *new_max_cache_size_ptr, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ size_t test_size;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert((status_ptr) && (*status_ptr == in_spec));
+ HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0));
+
+ /* remove excess epoch markers if any */
+ if (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction)
+ if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
+
+ if ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out) ||
+ ((cache_ptr->resize_ctl.decr_mode == H5C_decr__age_out_with_threshold) &&
+ (hit_rate >= cache_ptr->resize_ctl.upper_hr_threshold))) {
+
+ if (cache_ptr->max_cache_size > cache_ptr->resize_ctl.min_size) {
+ /* evict aged out cache entries if appropriate... */
+ if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
+
+ /* ... and then reduce cache size if appropriate */
+ if (cache_ptr->index_size < cache_ptr->max_cache_size) {
+ if (cache_ptr->resize_ctl.apply_empty_reserve) {
+ test_size =
+ (size_t)(((double)cache_ptr->index_size) / (1 - cache_ptr->resize_ctl.empty_reserve));
+ if (test_size < cache_ptr->max_cache_size) {
+ *status_ptr = decrease;
+ *new_max_cache_size_ptr = test_size;
+ }
+ }
+ else {
+ *status_ptr = decrease;
+ *new_max_cache_size_ptr = cache_ptr->index_size;
+ }
+
+ if (*status_ptr == decrease) {
+ /* clip to min size if necessary */
+ if (*new_max_cache_size_ptr < cache_ptr->resize_ctl.min_size)
+ *new_max_cache_size_ptr = cache_ptr->resize_ctl.min_size;
+
+ /* clip to max decrement if necessary */
+ if ((cache_ptr->resize_ctl.apply_max_decrement) &&
+ ((cache_ptr->resize_ctl.max_decrement + *new_max_cache_size_ptr) <
+ cache_ptr->max_cache_size))
+ *new_max_cache_size_ptr =
+ cache_ptr->max_cache_size - cache_ptr->resize_ctl.max_decrement;
+ }
+ }
+ }
+ else
+ *status_ptr = at_min_size;
+ }
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__autoadjust__ageout() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__autoadjust__ageout__cycle_epoch_marker
+ *
+ * Purpose: Remove the oldest epoch marker from the LRU list,
+ * and reinsert it at the head of the LRU list. Also
+ * remove the epoch marker's index from the head of the
+ * ring buffer, and re-insert it at the tail of the ring
+ * buffer.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr)
+{
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (cache_ptr->epoch_markers_active <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
+
+ /* remove the last marker from both the ring buffer and the LRU list */
+ i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ if (cache_ptr->epoch_marker_ringbuf_size <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
+ if (cache_ptr->epoch_marker_active[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+
+ H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
+ (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
+
+ /* now, re-insert it at the head of the LRU list, and at the tail of
+ * the ring buffer.
+ */
+ HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i);
+ HDassert(cache_ptr->epoch_markers[i].next == NULL);
+ HDassert(cache_ptr->epoch_markers[i].prev == NULL);
+
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_last] = i;
+ if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+
+ cache_ptr->epoch_marker_ringbuf_size += 1;
+
+ H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
+ cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C__autoadjust__ageout__cycle_epoch_marker() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__autoadjust__ageout__evict_aged_out_entries
+ *
+ * Purpose: Evict clean entries in the cache that haven't
+ * been accessed for at least
+ * cache_ptr->resize_ctl.epochs_before_eviction epochs,
+ * and flush dirty entries that haven't been accessed for
+ * that amount of time.
+ *
+ * Depending on configuration, the function will either
+ * flush or evict all such entries, or all such entries it
+ * encounters until it has freed the maximum amount of space
+ * allowed under the maximum decrement.
+ *
+ * If we are running in parallel mode, writes may not be
+ * permitted. If so, the function simply skips any dirty
+ * entries it may encounter.
+ *
+ * The function makes no attempt to maintain the minimum
+ * clean size, as there is no guarantee that the cache size
+ * will be changed.
+ *
+ * If there is no cache size change, the minimum clean size
+ * constraint will be met through a combination of clean
+ * entries and free space in the cache.
+ *
+ * If there is a cache size reduction, the minimum clean size
+ * will be re-calculated, and will be enforced the next time
+ * we have to make space in the cache.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ size_t eviction_size_limit;
+ size_t bytes_evicted = 0;
+ hbool_t prev_is_dirty = FALSE;
+ hbool_t restart_scan;
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *next_ptr;
+ H5C_cache_entry_t *prev_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* if there is a limit on the amount that the cache size can be decrease
+ * in any one round of the cache size reduction algorithm, load that
+ * limit into eviction_size_limit. Otherwise, set eviction_size_limit
+ * to the equivalent of infinity. The current size of the index will
+ * do nicely.
+ */
+ if (cache_ptr->resize_ctl.apply_max_decrement)
+ eviction_size_limit = cache_ptr->resize_ctl.max_decrement;
+ else
+ eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
+
+ if (write_permitted) {
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while (entry_ptr != NULL && entry_ptr->type->id != H5AC_EPOCH_MARKER_ID &&
+ bytes_evicted < eviction_size_limit) {
+ hbool_t skipping_entry = FALSE;
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
+
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
+
+ if (prev_ptr != NULL)
+ prev_is_dirty = prev_ptr->is_dirty;
+
+ if (entry_ptr->is_dirty) {
+ HDassert(!entry_ptr->prefetched_dirty);
+
+ /* dirty corked entry is skipped */
+ if (entry_ptr->tag_info && entry_ptr->tag_info->corked)
+ skipping_entry = TRUE;
+ else {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+
+ if (cache_ptr->entries_removed_counter > 1 ||
+ cache_ptr->last_entry_removed_ptr == prev_ptr)
+ restart_scan = TRUE;
+ } /* end else */
+ } /* end if */
+ else if (!entry_ptr->prefetched_dirty) {
+ bytes_evicted += entry_ptr->size;
+
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ } /* end else-if */
+ else {
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(entry_ptr->prefetched_dirty);
+
+ skipping_entry = TRUE;
+ } /* end else */
+
+ if (prev_ptr != NULL) {
+ if (skipping_entry)
+ entry_ptr = prev_ptr;
+ else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) ||
+ (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
+ /* Something has happened to the LRU -- start over
+ * from the tail.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+ } /* end else-if */
+ else
+ entry_ptr = prev_ptr;
+ } /* end if */
+ else
+ entry_ptr = NULL;
+ } /* end while */
+
+ /* for now at least, don't bother to maintain the minimum clean size,
+ * as the cache should now be less than its maximum size. Due to
+ * the vaguries of the cache size reduction algorithm, we may not
+ * reduce the size of the cache.
+ *
+ * If we do, we will calculate a new minimum clean size, which will
+ * be enforced the next time we try to make space in the cache.
+ *
+ * If we don't, no action is necessary, as we have just evicted and/or
+ * or flushed a bunch of entries and therefore the sum of the clean
+ * and free space in the cache must be greater than or equal to the
+ * min clean space requirement (assuming that requirement was met on
+ * entry).
+ */
+ } /* end if */
+ else /* ! write_permitted */ {
+ /* Since we are not allowed to write, all we can do is evict
+ * any clean entries that we may encounter before we either
+ * hit the eviction size limit, or encounter the epoch marker.
+ *
+ * If we are operating read only, this isn't an issue, as there
+ * will not be any dirty entries.
+ *
+ * If we are operating in R/W mode, all the dirty entries we
+ * skip will be flushed the next time we attempt to make space
+ * when writes are permitted. This may have some local
+ * performance implications, but it shouldn't cause any net
+ * slowdown.
+ */
+ HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
+ (bytes_evicted < eviction_size_limit)) {
+ HDassert(!(entry_ptr->is_protected));
+
+ prev_ptr = entry_ptr->prev;
+
+ if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
+
+ /* just skip the entry if it is dirty, as we can't do
+ * anything with it now since we can't write.
+ *
+ * Since all entries are clean, serialize() will not be called,
+ * and thus we needn't test to see if the LRU has been changed
+ * out from under us.
+ */
+ entry_ptr = prev_ptr;
+ } /* end while */
+ } /* end else */
+
+ if (cache_ptr->index_size < cache_ptr->max_cache_size)
+ cache_ptr->cache_full = FALSE;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__autoadjust__ageout__evict_aged_out_entries() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__autoadjust__ageout__insert_new_marker
+ *
+ * Purpose: Find an unused marker cache entry, mark it as used, and
+ * insert it at the head of the LRU list. Also add the
+ * marker's index in the epoch_markers array.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr)
+{
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (cache_ptr->epoch_markers_active >= cache_ptr->resize_ctl.epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
+
+ /* find an unused marker */
+ i = 0;
+ while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
+ i++;
+ if (i >= H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
+
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+
+ (cache_ptr->epoch_marker_active)[i] = TRUE;
+
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
+ if (cache_ptr->epoch_marker_ringbuf_size >= H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+
+ cache_ptr->epoch_marker_ringbuf_size += 1;
+
+ H5C__DLL_PREPEND(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
+ cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
+
+ cache_ptr->epoch_markers_active += 1;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__autoadjust__ageout__insert_new_marker() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__autoadjust__ageout__remove_all_markers
+ *
+ * Purpose: Remove all epoch markers from the LRU list and mark them
+ * as inactive.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr)
+{
+ int ring_buf_index;
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ while (cache_ptr->epoch_markers_active > 0) {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
+ */
+
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+
+ if (cache_ptr->epoch_marker_ringbuf_size <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
+
+ if (cache_ptr->epoch_marker_active[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+
+ /* remove the epoch marker from the LRU list */
+ H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
+ cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
+
+ /* mark the epoch marker as unused. */
+ cache_ptr->epoch_marker_active[i] = FALSE;
+
+ HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i);
+ HDassert(cache_ptr->epoch_markers[i].next == NULL);
+ HDassert(cache_ptr->epoch_markers[i].prev == NULL);
+
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
+
+ HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
+ }
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__autoadjust__ageout__remove_all_markers() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__autoadjust__ageout__remove_excess_markers
+ *
+ * Purpose: Remove epoch markers from the end of the LRU list and
+ * mark them as inactive until the number of active markers
+ * equals the current value of
+ * cache_ptr->resize_ctl.epochs_before_eviction.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr)
+{
+ int ring_buf_index;
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (cache_ptr->epoch_markers_active <= cache_ptr->resize_ctl.epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
+
+ while (cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction) {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
+ */
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+
+ if (cache_ptr->epoch_marker_ringbuf_size <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
+
+ if (cache_ptr->epoch_marker_active[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+
+ /* remove the epoch marker from the LRU list */
+ H5C__DLL_REMOVE(&(cache_ptr->epoch_markers[i]), cache_ptr->LRU_head_ptr, cache_ptr->LRU_tail_ptr,
+ cache_ptr->LRU_list_len, cache_ptr->LRU_list_size, FAIL)
+
+ /* mark the epoch marker as unused. */
+ cache_ptr->epoch_marker_active[i] = FALSE;
+
+ HDassert(cache_ptr->epoch_markers[i].addr == (haddr_t)i);
+ HDassert(cache_ptr->epoch_markers[i].next == NULL);
+ HDassert(cache_ptr->epoch_markers[i].prev == NULL);
+
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
+
+ HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
+ }
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__autoadjust__ageout__remove_excess_markers() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flash_increase_cache_size
+ *
+ * Purpose: If there is not at least new_entry_size - old_entry_size
+ * bytes of free space in the cache and the current
+ * max_cache_size is less than cache_ptr->resize_ctl.max_size,
+ * perform a flash increase in the cache size and then reset
+ * the full cache hit rate statistics, and exit.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 12/31/07
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size)
+{
+ size_t new_max_cache_size = 0;
+ size_t old_max_cache_size = 0;
+ size_t new_min_clean_size = 0;
+ size_t old_min_clean_size = 0;
+ size_t space_needed;
+ enum H5C_resize_status status = flash_increase; /* may change */
+ double hit_rate;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->flash_size_increase_possible);
+ HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold);
+ HDassert(old_entry_size < new_entry_size);
+
+ if (old_entry_size >= new_entry_size)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
+
+ space_needed = new_entry_size - old_entry_size;
+ if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
+ (cache_ptr->max_cache_size < cache_ptr->resize_ctl.max_size)) {
+ switch (cache_ptr->resize_ctl.flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
+
+ case H5C_flash_incr__add_space:
+ if (cache_ptr->index_size < cache_ptr->max_cache_size) {
+ HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed);
+ space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size;
+ }
+ space_needed = (size_t)(((double)space_needed) * cache_ptr->resize_ctl.flash_multiple);
+ new_max_cache_size = cache_ptr->max_cache_size + space_needed;
+ break;
+
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
+ }
+
+ if (new_max_cache_size > cache_ptr->resize_ctl.max_size)
+ new_max_cache_size = cache_ptr->resize_ctl.max_size;
+ HDassert(new_max_cache_size > cache_ptr->max_cache_size);
+
+ new_min_clean_size = (size_t)((double)new_max_cache_size * cache_ptr->resize_ctl.min_clean_fraction);
+ HDassert(new_min_clean_size <= new_max_cache_size);
+
+ old_max_cache_size = cache_ptr->max_cache_size;
+ old_min_clean_size = cache_ptr->min_clean_size;
+
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
+
+ /* update flash cache size increase fields as appropriate */
+ HDassert(cache_ptr->flash_size_increase_possible);
+
+ switch (cache_ptr->resize_ctl.flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
+
+ case H5C_flash_incr__add_space:
+ cache_ptr->flash_size_increase_threshold =
+ (size_t)((double)cache_ptr->max_cache_size * cache_ptr->resize_ctl.flash_threshold);
+ break;
+
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
+ }
+
+ /* note that we don't cycle the epoch markers. We can
+ * argue either way as to whether we should, but for now
+ * we don't.
+ */
+
+ if (cache_ptr->resize_ctl.rpt_fcn != NULL) {
+ /* get the hit rate for the reporting function. Should still
+ * be good as we haven't reset the hit rate statistics.
+ */
+ if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+
+ (cache_ptr->resize_ctl.rpt_fcn)(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status,
+ old_max_cache_size, new_max_cache_size, old_min_clean_size,
+ new_min_clean_size);
+ }
+
+ if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ }
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flash_increase_cache_size() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_invalidate_cache
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 3/24/05
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
+{
+ H5C_t *cache_ptr;
+ H5C_ring_t ring;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(cache_ptr->slist_enabled);
+
+#ifdef H5C_DO_SANITY_CHECKS
+ {
+ int32_t i;
+ uint32_t index_len = 0;
+ uint32_t slist_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* remove ageout markers if present */
+ if (cache_ptr->epoch_markers_active > 0)
+ if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
+
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while (ring < H5C_RING_NTYPES) {
+ if (H5C__flush_invalidate_ring(f, ring, flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
+ ring++;
+ } /* end while */
+
+#ifndef NDEBUG
+ /* Invariants, after destroying all entries in the hash table */
+ if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
+ HDassert(cache_ptr->index_size == 0);
+ HDassert(cache_ptr->clean_index_size == 0);
+ HDassert(cache_ptr->pel_len == 0);
+ HDassert(cache_ptr->pel_size == 0);
+ } /* end if */
+ else {
+ H5C_cache_entry_t *entry_ptr; /* Cache entry */
+ unsigned u; /* Local index variable */
+
+ /* All rings except ring 4 should be empty now */
+ /* (Ring 4 has the superblock) */
+ for (u = H5C_RING_USER; u < H5C_RING_SB; u++) {
+ HDassert(cache_ptr->index_ring_len[u] == 0);
+ HDassert(cache_ptr->index_ring_size[u] == 0);
+ HDassert(cache_ptr->clean_index_ring_size[u] == 0);
+ } /* end for */
+
+ /* Check that any remaining pinned entries are in the superblock ring */
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while (entry_ptr) {
+ /* Check ring */
+ HDassert(entry_ptr->ring == H5C_RING_SB);
+
+ /* Advance to next entry in pinned entry list */
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+ } /* end else */
+
+ HDassert(cache_ptr->dirty_index_size == 0);
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->pl_size == 0);
+ HDassert(cache_ptr->LRU_list_len == 0);
+ HDassert(cache_ptr->LRU_list_size == 0);
+#endif
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_invalidate_cache() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_invalidate_ring
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache and ring.
+ *
+ * If the ring contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the cache flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t *cache_ptr;
+ hbool_t restart_slist_scan;
+ uint32_t protected_entries = 0;
+ int32_t i;
+ uint32_t cur_ring_pel_len;
+ uint32_t old_ring_pel_len;
+ unsigned cooked_flags;
+ unsigned evict_flags;
+ H5SL_node_t *node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *next_entry_ptr = NULL;
+#ifdef H5C_DO_SANITY_CHECKS
+ uint32_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ cache_ptr = f->shared->cache;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->epoch_markers_active == 0);
+
+ /* Filter out the flags that are not relevant to the flush/invalidate.
+ */
+ cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
+ evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
+
+ /* The flush procedure here is a bit strange.
+ *
+ * In the outer while loop we make at least one pass through the
+ * cache, and then repeat until either all the pinned entries in
+ * the ring unpin themselves, or until the number of pinned entries
+ * in the ring stops declining. In this later case, we scream and die.
+ *
+ * Since the fractal heap can dirty, resize, and/or move entries
+ * in is flush callback, it is possible that the cache will still
+ * contain dirty entries at this point. If so, we must make more
+ * passes through the skip list to allow it to empty.
+ *
+ * Further, since clean entries can be dirtied, resized, and/or moved
+ * as the result of a flush call back (either the entries own, or that
+ * for some other cache entry), we can no longer promise to flush
+ * the cache entries in increasing address order.
+ *
+ * Instead, we make a pass through
+ * the skip list, and then a pass through the "clean" entries, and
+ * then repeating as needed. Thus it is quite possible that an
+ * entry will be evicted from the cache only to be re-loaded later
+ * in the flush process.
+ *
+ * The bottom line is that entries will probably be flushed in close
+ * to increasing address order, but there are no guarantees.
+ */
+
+ /* compute the number of pinned entries in this ring */
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+ if (entry_ptr->ring == ring)
+ cur_ring_pel_len++;
+
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+ old_ring_pel_len = cur_ring_pel_len;
+
+ while (cache_ptr->index_ring_len[ring] > 0) {
+ /* first, try to flush-destroy any dirty entries. Do this by
+ * making a scan through the slist. Note that new dirty entries
+ * may be created by the flush call backs. Thus it is possible
+ * that the slist will not be empty after we finish the scan.
+ */
+
+#ifdef H5C_DO_SANITY_CHECKS
+ /* Depending on circumstances, H5C__flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
+ * slist length and size before we do any flushes.
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* There is also the possibility that entries will be
+ * dirtied, resized, moved, and/or removed from the cache
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
+ * of sanity checking.
+ *
+ * To this end, we must zero these fields before we start
+ * the pass through the slist.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Set the cache_ptr->slist_changed to false.
+ *
+ * This flag is set to TRUE by H5C__flush_single_entry if the slist
+ * is modified by a pre_serialize, serialize, or notify callback.
+ *
+ * H5C__flush_invalidate_ring() uses this flag to detect any
+ * modifications to the slist that might corrupt the scan of
+ * the slist -- and restart the scan in this event.
+ */
+ cache_ptr->slist_changed = FALSE;
+
+ /* this done, start the scan of the slist */
+ restart_slist_scan = TRUE;
+ while (restart_slist_scan || (node_ptr != NULL)) {
+ if (restart_slist_scan) {
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ if (node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
+
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if (NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* It is possible that entries will be dirtied, resized,
+ * flushed, or removed from the cache via the take ownership
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
+ * This in turn can corrupt the scan through the slist.
+ *
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
+ * slist if we find them. However, best we do some extra
+ * sanity checking just in case.
+ */
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
+
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if (node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if (NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ /* Note that we now remove nodes from the slist as we flush
+ * the associated entries, instead of leaving them there
+ * until we are done, and then destroying all nodes in
+ * the slist.
+ *
+ * While this optimization used to be easy, with the possibility
+ * of new entries being added to the slist in the midst of the
+ * flush, we must keep the slist in canonical form at all
+ * times.
+ */
+ if (((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
+ if (entry_ptr->is_protected) {
+ /* We have major problems -- but lets flush
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
+ } /* end if */
+ else if (entry_ptr->is_pinned) {
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
+
+ if (cache_ptr->slist_changed) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
+ } /* end if */
+ } /* end else-if */
+ else {
+ if (H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags | H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
+
+ if (cache_ptr->slist_changed) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end else */
+ } /* end if */
+ } /* end while loop scanning skip list */
+
+#ifdef H5C_DO_SANITY_CHECKS
+ /* It is possible that entries were added to the slist during
+ * the scan, either before or after scan pointer. The following
+ * asserts take this into account.
+ *
+ * Don't bother with the sanity checks if node_ptr != NULL, as
+ * in this case we broke out of the loop because it got changed
+ * out from under us.
+ */
+
+ if (node_ptr == NULL) {
+ HDassert(cache_ptr->slist_len ==
+ (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert(cache_ptr->slist_size ==
+ (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Since we are doing a destroy, we must make a pass through
+ * the hash table and try to flush - destroy all entries that
+ * remain.
+ *
+ * It used to be that all entries remaining in the cache at
+ * this point had to be clean, but with the fractal heap mods
+ * this may not be the case. If so, we will flush entries out
+ * in increasing address order.
+ *
+ * Writes to disk are possible here.
+ */
+
+ /* Reset the counters so that we can detect insertions, loads,
+ * and moves caused by the pre_serialize and serialize calls.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ next_entry_ptr = cache_ptr->il_head;
+ while (next_entry_ptr != NULL) {
+ entry_ptr = next_entry_ptr;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+
+ next_entry_ptr = entry_ptr->il_next;
+ HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
+
+ if (((!entry_ptr->flush_me_last) ||
+ (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
+
+ if (entry_ptr->is_protected) {
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
+ */
+ protected_entries++;
+
+ if (!entry_ptr->in_slist)
+ HDassert(!(entry_ptr->is_dirty));
+ } /* end if */
+ else if (!entry_ptr->is_pinned) {
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
+ * into the cache, or moved to a new location
+ * in the file as a side effect of the flush.
+ *
+ * It's also possible that removing a clean
+ * entry will remove the last child of a proxy
+ * entry, allowing it to be removed also and
+ * invalidating the next_entry_ptr.
+ *
+ * If either of these happen, and one of the target
+ * or proxy entries happens to be the next entry in
+ * the hash bucket, we could either find ourselves
+ * either scanning a non-existent entry, scanning
+ * through a different bucket, or skipping an entry.
+ *
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
+ * after the flush if we detect that the next_entry_ptr
+ * becomes invalid.
+ *
+ * This is not as inefficient at it might seem,
+ * as hash buckets typically have at most two
+ * or three entries.
+ */
+ cache_ptr->entry_watched_for_removal = next_entry_ptr;
+ if (H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags | H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
+
+ /* Restart the index list scan if necessary. Must
+ * do this if the next entry is evicted, and also if
+ * one or more entries are inserted, loaded, or moved
+ * as these operations can result in part of the scan
+ * being skipped -- which can cause a spurious failure
+ * if this results in the size of the pinned entry
+ * failing to decline during the pass.
+ */
+ if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) ||
+ (cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
+
+ next_entry_ptr = cache_ptr->il_head;
+
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ else
+ cache_ptr->entry_watched_for_removal = NULL;
+ } /* end if */
+ } /* end if */
+ } /* end for loop scanning hash table */
+
+ /* We can't do anything if entries are pinned. The
+ * hope is that the entries will be unpinned as the
+ * result of destroys of entries that reference them.
+ *
+ * We detect this by noting the change in the number
+ * of pinned entries from pass to pass. If it stops
+ * shrinking before it hits zero, we scream and die.
+ */
+ old_ring_pel_len = cur_ring_pel_len;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
+
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+
+ if (entry_ptr->ring == ring)
+ cur_ring_pel_len++;
+
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ /* Check if the number of pinned entries in the ring is positive, and
+ * it is not declining. Scream and die if so.
+ */
+ if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ /* Don't error if allowed to have pinned entries remaining */
+ if (evict_flags)
+ HGOTO_DONE(TRUE)
+
+ HGOTO_ERROR(
+ H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d",
+ (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
+ } /* end if */
+
+ HDassert(protected_entries == cache_ptr->pl_len);
+
+ if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "Only protected entries left in cache, protected_entries = %d",
+ (int)protected_entries)
+ } /* main while loop */
+
+ /* Invariants, after destroying all entries in the ring */
+ for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
+ HDassert(cache_ptr->index_ring_len[i] == 0);
+ HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
+
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
+ } /* end for */
+
+ HDassert(protected_entries <= cache_ptr->pl_len);
+
+ if (protected_entries > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
+ else if (cur_ring_pel_len > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_invalidate_ring() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_ring
+ *
+ * Purpose: Flush the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been flushed on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be flushed. However all unprotected entries in the target
+ * ring should be flushed before the function returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the slist flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ hbool_t flushed_entries_last_pass;
+ hbool_t flush_marked_entries;
+ hbool_t ignore_protected;
+ hbool_t tried_to_flush_protected_entry = FALSE;
+ hbool_t restart_slist_scan;
+ uint32_t protected_entries = 0;
+ H5SL_node_t *node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *next_entry_ptr = NULL;
+#ifdef H5C_DO_SANITY_CHECKS
+ uint32_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ int i;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
+ flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
+
+ if (!flush_marked_entries)
+ for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+
+ HDassert(cache_ptr->flush_in_progress);
+
+ /* When we are only flushing marked entries, the slist will usually
+ * still contain entries when we have flushed everything we should.
+ * Thus we track whether we have flushed any entries in the last
+ * pass, and terminate if we haven't.
+ */
+ flushed_entries_last_pass = TRUE;
+
+ /* Set the cache_ptr->slist_changed to false.
+ *
+ * This flag is set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize, serialize, or notify callback.
+ * H5C_flush_cache uses this flag to detect any modifications
+ * to the slist that might corrupt the scan of the slist -- and
+ * restart the scan in this event.
+ */
+ cache_ptr->slist_changed = FALSE;
+
+ while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) {
+ flushed_entries_last_pass = FALSE;
+
+#ifdef H5C_DO_SANITY_CHECKS
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ restart_slist_scan = TRUE;
+ while ((restart_slist_scan) || (node_ptr != NULL)) {
+ if (restart_slist_scan) {
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ if (node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
+
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if (NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, there is code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+
+ if (!flush_marked_entries || entry_ptr->flush_marker)
+ HDassert(entry_ptr->ring >= ring);
+
+ /* Advance node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if (node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if (NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+
+ if (!flush_marked_entries || next_entry_ptr->flush_marker)
+ HDassert(next_entry_ptr->ring >= ring);
+
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ if ((!flush_marked_entries || entry_ptr->flush_marker) &&
+ ((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
+ (flush_marked_entries && entry_ptr->flush_marker)))) &&
+ ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) &&
+ (entry_ptr->ring == ring)) {
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
+ if (entry_ptr->is_protected) {
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+ } /* end if */
+ else {
+ if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
+
+ if (cache_ptr->slist_changed) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+
+ flushed_entries_last_pass = TRUE;
+ } /* end else */
+ } /* end if */
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
+
+#ifdef H5C_DO_SANITY_CHECKS
+ /* Verify that the slist size and length are as expected. */
+ HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) ==
+ cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) ==
+ cache_ptr->slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+ } /* while */
+
+ HDassert(protected_entries <= cache_ptr->pl_len);
+
+ if (((cache_ptr->pl_len > 0) && !ignore_protected) || tried_to_flush_protected_entry)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
+
+#ifdef H5C_DO_SANITY_CHECKS
+ if (!flush_marked_entries) {
+ HDassert(cache_ptr->slist_ring_len[ring] == 0);
+ HDassert(cache_ptr->slist_ring_size[ring] == 0);
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_ring() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__make_space_in_cache
+ *
+ * Purpose: Attempt to evict cache entries until the index_size
+ * is at least needed_space below max_cache_size.
+ *
+ * In passing, also attempt to bring cLRU_list_size to a
+ * value greater than min_clean_size.
+ *
+ * Depending on circumstances, both of these goals may
+ * be impossible, as in parallel mode, we must avoid generating
+ * a write as part of a read (to avoid deadlock in collective
+ * I/O), and in all cases, it is possible (though hopefully
+ * highly unlikely) that the protected list may exceed the
+ * maximum size of the cache.
+ *
+ * Thus the function simply does its best, returning success
+ * unless an error is encountered.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/14/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+#if H5C_COLLECT_CACHE_STATS
+ int32_t clean_entries_skipped = 0;
+ int32_t dirty_pf_entries_skipped = 0;
+ int32_t total_entries_scanned = 0;
+#endif /* H5C_COLLECT_CACHE_STATS */
+ uint32_t entries_examined = 0;
+ uint32_t initial_list_len;
+ size_t empty_space;
+ hbool_t reentrant_call = FALSE;
+ hbool_t prev_is_dirty = FALSE;
+ hbool_t didnt_flush_entry = FALSE;
+ hbool_t restart_scan;
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *prev_ptr;
+ H5C_cache_entry_t *next_ptr;
+#ifndef NDEBUG
+ uint32_t num_corked_entries = 0;
+#endif
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+
+ /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the make
+ * space in cache process. To avoid an infinite recursion, set
+ * reentrant_call to TRUE, and goto done.
+ */
+ if (cache_ptr->msic_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED);
+ } /* end if */
+
+ cache_ptr->msic_in_progress = TRUE;
+
+ if (write_permitted) {
+ restart_scan = FALSE;
+ initial_list_len = cache_ptr->LRU_list_len;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
+ empty_space = 0;
+ else
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) ||
+ ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) &&
+ (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
+
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
+
+ if (prev_ptr != NULL)
+ prev_is_dirty = prev_ptr->is_dirty;
+
+ if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
+ /* Skip "dirty" corked entries. */
+#ifndef NDEBUG
+ ++num_corked_entries;
+#endif
+ didnt_flush_entry = TRUE;
+ }
+ else if ((entry_ptr->type->id != H5AC_EPOCH_MARKER_ID) && !entry_ptr->flush_in_progress &&
+ !entry_ptr->prefetched_dirty) {
+ didnt_flush_entry = FALSE;
+ if (entry_ptr->is_dirty) {
+#if H5C_COLLECT_CACHE_STATS
+ if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size)
+ cache_ptr->entries_scanned_to_make_space++;
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+
+ if ((cache_ptr->entries_removed_counter > 1) ||
+ (cache_ptr->last_entry_removed_ptr == prev_ptr))
+
+ restart_scan = TRUE;
+ }
+ else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
+#ifdef H5_HAVE_PARALLEL
+ && !(entry_ptr->coll_access)
+#endif /* H5_HAVE_PARALLEL */
+ ) {
+#if H5C_COLLECT_CACHE_STATS
+ cache_ptr->entries_scanned_to_make_space++;
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if (H5C__flush_single_entry(f, entry_ptr,
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ }
+ else {
+ /* We have enough space so don't flush clean entry. */
+#if H5C_COLLECT_CACHE_STATS
+ clean_entries_skipped++;
+#endif /* H5C_COLLECT_CACHE_STATS */
+ didnt_flush_entry = TRUE;
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ total_entries_scanned++;
+#endif /* H5C_COLLECT_CACHE_STATS */
+ }
+ else {
+
+ /* Skip epoch markers, entries that are in the process
+ * of being flushed, and entries marked as prefetched_dirty
+ * (occurs in the R/O case only).
+ */
+ didnt_flush_entry = TRUE;
+
+#if H5C_COLLECT_CACHE_STATS
+ if (entry_ptr->prefetched_dirty)
+ dirty_pf_entries_skipped++;
+#endif /* H5C_COLLECT_CACHE_STATS */
+ }
+
+ if (prev_ptr != NULL) {
+ if (didnt_flush_entry)
+ /* epoch markers don't get flushed, and we don't touch
+ * entries that are in the process of being flushed.
+ * Hence no need for sanity checks, as we haven't
+ * flushed anything. Thus just set entry_ptr to prev_ptr
+ * and go on.
+ */
+ entry_ptr = prev_ptr;
+ else if (restart_scan || prev_ptr->is_dirty != prev_is_dirty || prev_ptr->next != next_ptr ||
+ prev_ptr->is_protected || prev_ptr->is_pinned) {
+ /* something has happened to the LRU -- start over
+ * from the tail.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+ }
+ else
+ entry_ptr = prev_ptr;
+ }
+ else
+ entry_ptr = NULL;
+
+ entries_examined++;
+
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
+ empty_space = 0;
+ else
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+
+ HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ cache_ptr->calls_to_msic++;
+
+ cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
+ cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
+ cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
+
+ if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic)
+ cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
+
+ if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
+ cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
+
+ if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic)
+ cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* NEED: work on a better assert for corked entries */
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
+ cache_ptr->max_cache_size) ||
+ ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) ||
+ ((num_corked_entries)));
+#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size));
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size));
+
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ }
+ else {
+ HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
+
+#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+ initial_list_len = cache_ptr->cLRU_list_len;
+ entry_ptr = cache_ptr->cLRU_tail_ptr;
+
+ while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
+ (entries_examined <= initial_list_len) && (entry_ptr != NULL)) {
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
+ HDassert(!(entry_ptr->is_dirty));
+
+ prev_ptr = entry_ptr->aux_prev;
+
+ if (!entry_ptr->prefetched_dirty
+#ifdef H5_HAVE_PARALLEL
+ && !entry_ptr->coll_access
+#endif /* H5_HAVE_PARALLEL */
+ ) {
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ } /* end if */
+
+ /* we are scanning the clean LRU, so the serialize function
+ * will not be called on any entry -- thus there is no
+ * concern about the list being modified out from under
+ * this function.
+ */
+
+ entry_ptr = prev_ptr;
+ entries_examined++;
+ }
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ }
+
+done:
+ /* Sanity checks */
+ HDassert(cache_ptr->msic_in_progress);
+ if (!reentrant_call)
+ cache_ptr->msic_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__make_space_in_cache() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_cache
+ *
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
+ *
+ * Note that flush dependencies and "flush me last" flags
+ * must be observed in the serialization process.
+ *
+ * Note also that entries may be loaded, flushed, evicted,
+ * expunged, relocated, resized, or removed from the cache
+ * during this process, just as these actions may occur during
+ * a regular flush.
+ *
+ * However, we are given that the cache will contain no protected
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
+ *
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
+ *
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
+ * image so that the size of the cache image can be calculated.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 7/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_cache(H5F_t *f)
+{
+#ifdef H5C_DO_SANITY_CHECKS
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t *cache_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#ifdef H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#ifdef H5C_DO_EXTREME_SANITY_CHECKS
+ if (H5C__validate_protected_entry_list(cache_ptr) < 0 || H5C__validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C__validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+#ifndef NDEBUG
+ /* if this is a debug build, set the serialization_count field of
+ * each entry in the cache to zero before we start the serialization.
+ * This allows us to detect the case in which any entry is serialized
+ * more than once (a performance issues), and more importantly, the
+ * case is which any flush dependency parent is serializes more than
+ * once (a correctness issue).
+ */
+ {
+ H5C_cache_entry_t *scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while (scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ scan_ptr->serialization_count = 0;
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif
+
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
+ * to FALSE at the end of the function. Must maintain this flag
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
+ * clients.
+ */
+ HDassert(!cache_ptr->serialization_in_progress);
+ cache_ptr->serialization_in_progress = TRUE;
+
+ /* Serialize each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while (ring < H5C_RING_NTYPES) {
+ HDassert(cache_ptr->close_warning_received);
+ switch (ring) {
+ case H5C_RING_USER:
+ break;
+
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if (!cache_ptr->rdfsm_settled)
+ if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
+
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if (!cache_ptr->mdfsm_settled)
+ if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
+
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
+
+ if (H5C__serialize_ring(f, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+
+ ring++;
+ } /* end while */
+
+#ifndef NDEBUG
+ /* Verify that no entry has been serialized more than once.
+ * FD parents with multiple serializations should have been caught
+ * elsewhere, so no specific check for them here.
+ */
+ {
+ H5C_cache_entry_t *scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while (scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(scan_ptr->serialization_count <= 1);
+
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif
+
+done:
+ cache_ptr->serialization_in_progress = FALSE;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_cache() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_ring
+ *
+ * Purpose: Serialize the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been serialized on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
+ * returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the index list
+ * serializing entries in flush dependency order.
+ *
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
+{
+ hbool_t done = FALSE;
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->serialization_in_progress);
+
+ /* The objective here is to serialize all entries in the cache ring
+ * in flush dependency order.
+ *
+ * The basic algorithm is to scan the cache index list looking for
+ * unserialized entries that are either not in a flush dependency
+ * relationship, or which have no unserialized children. Any such
+ * entry is serialized and its flush dependency parents (if any) are
+ * informed -- allowing them to decrement their userialized child counts.
+ *
+ * However, this algorithm is complicated by the ability
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
+ * relocation, resize, dirty, flush, eviction, or removal (via the
+ * take ownership flag) of entries. Changes in the flush dependency
+ * structure are also possible.
+ *
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
+ * are serialized correctly, it doesn't matter if we have to go back
+ * and serialize an entry a second time.
+ *
+ * These possible actions result in the following modifications to
+ * the basic algorithm:
+ *
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
+ * scan is no longer in the cache. Were we to examine this entry,
+ * we would be accessing deallocated memory.
+ *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
+ * However, this is a major issue for a flush, as were this to happen
+ * in a flush, it would violate the invariant that the flush dependency
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
+ * preventing this behaviour. However, it should detect it if at all
+ * possible.
+ *
+ * Do this by maintaining a count of the number of times each entry is
+ * serialized during a cache serialization. If any flush dependency
+ * parent is serialized more than once, throw an assertion failure.
+ *
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
+ * the current or next entries irrelevant.
+ *
+ * Note that since a relocation may result in our skipping part of
+ * the index list, we must always do at least one more pass through
+ * the index list after an entry relocation.
+ *
+ * 4) Changes in the flush dependency structure are possible on
+ * entry insertion, load, expunge, evict, or remove. Destruction
+ * of a flush dependency has no effect, as it can only relax the
+ * flush dependencies. Creation of a flush dependency can create
+ * an unserialized child of a flush dependency parent where all
+ * flush dependency children were previously serialized. Should
+ * this child dirty the flush dependency parent when it is serialized,
+ * the parent will be re-serialized.
+ *
+ * Per the discussion of 2) above, this is a non issue for cache
+ * serialization, and a major problem for cache flush. Using the
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
+ *
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush dependency
+ * tree does not change beyond the removal of a leaf.
+ */
+ while (!done) {
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ done = TRUE; /* set to FALSE if any activity in inner loop */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Verify that either the entry is already serialized, or
+ * that it is assigned to either the target or an inner
+ * ring.
+ */
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ /* Skip flush me last entries or inner ring entries */
+ if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
+
+ /* if we encounter an unserialized entry in the current
+ * ring that is not marked flush me last, we are not done.
+ */
+ if (!entry_ptr->image_up_to_date)
+ done = FALSE;
+
+ /* Serialize the entry if its image is not up to date
+ * and it has no unserialized flush dependency children.
+ */
+ if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
+ HDassert(entry_ptr->serialization_count == 0);
+
+ /* Serialize the entry */
+ if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif
+ } /* end if */
+ } /* end if */
+
+ /* Check for the cache being perturbed during the entry serialize */
+ if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
+
+#if H5C_COLLECT_CACHE_STATS
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* Reset the counters */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* Restart scan */
+ entry_ptr = cache_ptr->il_head;
+ } /* end if */
+ else
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
+
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* At this point, all entries not marked "flush me last" and in
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
+ * verify that all other entries have up to date images.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ if (entry_ptr->ring == ring) {
+ if (entry_ptr->flush_me_last) {
+ if (!entry_ptr->image_up_to_date) {
+ HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
+ /* Serialize the entry */
+ if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ /* Check for the cache changing */
+ if ((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flush_me_last entry serialization triggered restart")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif
+ } /* end if */
+ } /* end if */
+ else {
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->serialization_count <= 1);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ } /* end else */
+ } /* if ( entry_ptr->ring == ring ) */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+
+done:
+ HDassert(cache_ptr->serialization_in_progress);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_ring() */
diff --git a/src/H5Clog.c b/src/H5Clog.c
index 25b3cf0..b0eff4d 100644
--- a/src/H5Clog.c
+++ b/src/H5Clog.c
@@ -100,11 +100,11 @@ H5C_log_set_up(H5C_t *cache, const char log_location[], H5C_log_style_t style, h
/* Set up logging */
if (H5C_LOG_STYLE_JSON == style) {
- if (H5C_log_json_set_up(cache->log_info, log_location, mpi_rank) < 0)
+ if (H5C__log_json_set_up(cache->log_info, log_location, mpi_rank) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to set up json logging")
}
else if (H5C_LOG_STYLE_TRACE == style) {
- if (H5C_log_trace_set_up(cache->log_info, log_location, mpi_rank) < 0)
+ if (H5C__log_trace_set_up(cache->log_info, log_location, mpi_rank) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to set up trace logging")
}
else
diff --git a/src/H5Clog.h b/src/H5Clog.h
index b3636c6..b8ea5ed 100644
--- a/src/H5Clog.h
+++ b/src/H5Clog.h
@@ -132,7 +132,7 @@ H5_DLL herr_t H5C_log_write_remove_entry_msg(H5C_t *cache, const H5C_cache_entry
herr_t fxn_ret_value);
/* Logging-specific setup functions */
-H5_DLL herr_t H5C_log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank);
-H5_DLL herr_t H5C_log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank);
+H5_DLL herr_t H5C__log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank);
+H5_DLL herr_t H5C__log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank);
#endif /* H5Clog_H */
diff --git a/src/H5Clog_json.c b/src/H5Clog_json.c
index ad27c94..afb81c5 100644
--- a/src/H5Clog_json.c
+++ b/src/H5Clog_json.c
@@ -181,7 +181,7 @@ done:
} /* H5C__json_write_log_message() */
/*-------------------------------------------------------------------------
- * Function: H5C_log_json_set_up
+ * Function: H5C__log_json_set_up
*
* Purpose: Setup for metadata cache logging.
*
@@ -205,14 +205,14 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank)
+H5C__log_json_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank)
{
H5C_log_json_udata_t *json_udata = NULL;
char *file_name = NULL;
size_t n_chars;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/* Sanity checks */
HDassert(log_info);
@@ -271,7 +271,7 @@ done:
}
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_log_json_set_up() */
+} /* H5C__log_json_set_up() */
/*-------------------------------------------------------------------------
* Function: H5C__json_tear_down_logging
diff --git a/src/H5Clog_trace.c b/src/H5Clog_trace.c
index 74f33ed..b095e87 100644
--- a/src/H5Clog_trace.c
+++ b/src/H5Clog_trace.c
@@ -176,7 +176,7 @@ done:
} /* H5C__trace_write_log_message() */
/*-------------------------------------------------------------------------
- * Function: H5C_log_trace_set_up
+ * Function: H5C__log_trace_set_up
*
* Purpose: Setup for metadata cache logging.
*
@@ -200,14 +200,14 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank)
+H5C__log_trace_set_up(H5C_log_info_t *log_info, const char log_location[], int mpi_rank)
{
H5C_log_trace_udata_t *trace_udata = NULL;
char *file_name = NULL;
size_t n_chars;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_PACKAGE
/* Sanity checks */
HDassert(log_info);
@@ -269,7 +269,7 @@ done:
}
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_log_trace_set_up() */
+} /* H5C__log_trace_set_up() */
/*-------------------------------------------------------------------------
* Function: H5C__trace_tear_down_logging
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 5822746..47f9b1f 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -397,7 +397,6 @@ done:
} /* H5C_apply_candidate_list() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_construct_candidate_list__clean_cache
*
* Purpose: Construct the list of entries that should be flushed to
@@ -597,7 +596,6 @@ done:
} /* H5C_construct_candidate_list__min_clean() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_mark_entries_as_clean
*
* Purpose: When the H5C code is used to implement the metadata caches
@@ -827,7 +825,6 @@ done:
} /* H5C_mark_entries_as_clean() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_clear_coll_entries
*
* Purpose: Clear half or the entire list of collective entries and
@@ -879,7 +876,6 @@ done:
} /* H5C_clear_coll_entries */
/*-------------------------------------------------------------------------
- *
* Function: H5C__collective_write
*
* Purpose: Perform a collective write of a list of metadata entries.
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 2cad464..d9203b8 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -299,6 +299,16 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \
(cache_ptr)->unpins[(entry_ptr)->type->id]++;
+#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \
+{ \
+ (cache_ptr)->prefetches++; \
+ if (dirty) \
+ (cache_ptr)->dirty_prefetches++; \
+}
+
+#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \
+ (cache_ptr)->prefetch_hits++;
+
#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) \
(cache_ptr)->slist_scan_restarts++;
@@ -512,6 +522,8 @@ if ((new_size) > (dll_size) || ((dll_len) == 1 && (new_size) != (dll_size))) { \
#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty)
+#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
#define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
@@ -3256,19 +3268,22 @@ typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx);
/* Package Private Prototypes */
/******************************/
H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated);
-H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t * cache_ptr,
- H5C_cache_entry_t** entry_ptr_ptr, const H5C_class_t * type, haddr_t addr,
- void * udata);
/* General routines */
+H5_DLL herr_t H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted);
+H5_DLL herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr);
+H5_DLL herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr);
+H5_DLL herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size);
+H5_DLL herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags);
+H5_DLL herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
H5_DLL herr_t H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr,
unsigned flags);
H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr);
H5_DLL herr_t H5C__load_cache_image(H5F_t *f);
H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed,
hbool_t write_permitted);
-H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f);
H5_DLL herr_t H5C__serialize_cache(H5F_t *f);
+H5_DLL herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
H5C_tag_iter_cb_t cb, void *cb_ctx);
@@ -3276,10 +3291,14 @@ H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_
H5_DLL herr_t H5C__tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr);
H5_DLL herr_t H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry);
+/* Routines for operating on cache images */
+H5_DLL herr_t H5C__get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr);
+H5_DLL herr_t H5C__image_stats(H5C_t *cache_ptr, hbool_t print_header);
+
+/* Debugging routines */
#ifdef H5C_DO_SLIST_SANITY_CHECKS
H5_DLL hbool_t H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr);
#endif
-
#ifdef H5C_DO_EXTREME_SANITY_CHECKS
H5_DLL herr_t H5C__validate_lru_list(H5C_t *cache_ptr);
H5_DLL herr_t H5C__validate_pinned_entry_list(H5C_t *cache_ptr);
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index a5330ce..ed52ed2 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -2215,7 +2215,6 @@ herr_t H5C_verify_tag(int id, haddr_t tag);
#endif
H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f);
H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr);
-H5_DLL herr_t H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr);
H5_DLL herr_t H5C_get_cache_size(const H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr);
H5_DLL herr_t H5C_get_cache_flush_in_progress(const H5C_t *cache_ptr, hbool_t *flush_in_progress_ptr);
@@ -2226,7 +2225,6 @@ H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr, size_t *size_pt
hbool_t *is_flush_dep_child_ptr, hbool_t *image_up_to_date_ptr);
H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr);
H5_DLL void *H5C_get_aux_ptr(const H5C_t *cache_ptr);
-H5_DLL herr_t H5C_image_stats(H5C_t *cache_ptr, hbool_t print_header);
H5_DLL herr_t H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing,
unsigned int flags);
H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, hbool_t rw);
diff --git a/src/H5Cquery.c b/src/H5Cquery.c
index db73d65..6325d1f 100644
--- a/src/H5Cquery.c
+++ b/src/H5Cquery.c
@@ -207,7 +207,6 @@ done:
} /* H5C_get_cache_hit_rate() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_get_entry_status
*
* Purpose: This function is used to determine whether the cache
diff --git a/src/H5Ctag.c b/src/H5Ctag.c
index 10089e4..d821e28 100644
--- a/src/H5Ctag.c
+++ b/src/H5Ctag.c
@@ -38,8 +38,6 @@
#include "H5CXprivate.h" /* API Contexts */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files */
-#include "H5Iprivate.h" /* IDs */
-#include "H5Pprivate.h" /* Property lists */
/****************/
/* Local Macros */
@@ -81,7 +79,9 @@ typedef struct {
/********************/
/* Local Prototypes */
/********************/
+static herr_t H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, void *cb_ctx);
static herr_t H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag);
+static herr_t H5C__flush_marked_entries(H5F_t *f);
/*********************/
/* Package Variables */
@@ -99,7 +99,6 @@ H5FL_EXTERN(H5C_tag_info_t);
/*******************/
/*-------------------------------------------------------------------------
- *
* Function: H5C_ignore_tags
*
* Purpose: Override all assertion frameworks associated with making
@@ -136,7 +135,6 @@ H5C_ignore_tags(H5C_t *cache)
} /* H5C_ignore_tags */
/*-------------------------------------------------------------------------
- *
* Function: H5C_get_ignore_tags
*
* Purpose: Retrieve the 'ignore_tags' field for the cache
@@ -148,7 +146,7 @@ H5C_ignore_tags(H5C_t *cache)
*
*-------------------------------------------------------------------------
*/
-hbool_t
+H5_ATTR_PURE hbool_t
H5C_get_ignore_tags(const H5C_t *cache)
{
FUNC_ENTER_NOAPI_NOERR
@@ -162,7 +160,6 @@ H5C_get_ignore_tags(const H5C_t *cache)
} /* H5C_get_ignore_tags */
/*-------------------------------------------------------------------------
- *
* Function: H5C_get_num_objs_corked
*
* Purpose: Retrieve the 'num_objs_corked' field for the cache
@@ -173,7 +170,7 @@ H5C_get_ignore_tags(const H5C_t *cache)
*
*-------------------------------------------------------------------------
*/
-uint32_t
+H5_ATTR_PURE uint32_t
H5C_get_num_objs_corked(const H5C_t *cache)
{
FUNC_ENTER_NOAPI_NOERR
@@ -187,7 +184,6 @@ H5C_get_num_objs_corked(const H5C_t *cache)
} /* H5C_get_num_objs_corked */
/*-------------------------------------------------------------------------
- *
* Function: H5C__tag_entry
*
* Purpose: Tags an entry with the provided tag (contained in the API context).
@@ -274,7 +270,6 @@ done:
} /* H5C__tag_entry */
/*-------------------------------------------------------------------------
- *
* Function: H5C__untag_entry
*
* Purpose: Removes an entry from a tag list, possibly removing the tag
@@ -333,7 +328,6 @@ H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry)
} /* H5C__untag_entry */
/*-------------------------------------------------------------------------
- *
* Function: H5C__iter_tagged_entries_real
*
* Purpose: Iterate over tagged entries, making a callback for matches
@@ -390,7 +384,6 @@ done:
} /* H5C__iter_tagged_entries_real() */
/*-------------------------------------------------------------------------
- *
* Function: H5C__iter_tagged_entries
*
* Purpose: Iterate over tagged entries, making a callback for matches
@@ -434,7 +427,6 @@ done:
} /* H5C__iter_tagged_entries() */
/*-------------------------------------------------------------------------
- *
* Function: H5C__evict_tagged_entries_cb
*
* Purpose: Callback for evicting tagged entries
@@ -485,7 +477,6 @@ done:
} /* H5C__evict_tagged_entries_cb() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_evict_tagged_entries
*
* Purpose: Evicts all entries with the specified tag from cache
@@ -564,7 +555,6 @@ done:
} /* H5C_evict_tagged_entries() */
/*-------------------------------------------------------------------------
- *
* Function: H5C__mark_tagged_entries_cb
*
* Purpose: Callback to set the flush marker on dirty entries in the cache
@@ -594,7 +584,6 @@ H5C__mark_tagged_entries_cb(H5C_cache_entry_t *entry, void H5_ATTR_UNUSED *_ctx)
} /* H5C__mark_tagged_entries_cb() */
/*-------------------------------------------------------------------------
- *
* Function: H5C__mark_tagged_entries
*
* Purpose: Set the flush marker on dirty entries in the cache that have
@@ -629,10 +618,50 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__mark_tagged_entries() */
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_marked_entries
+ *
+ * Purpose: Flushes all marked entries in the cache.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Mike McGreevy
+ * November 3, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_marked_entries(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Assertions */
+ HDassert(f != NULL);
+
+ /* Enable the slist, as it is needed in the flush */
+ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
+ /* Flush all marked entries */
+ if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
+
+ /* Disable the slist. Set the clear_slist parameter to TRUE
+ * since we called H5C_flush_cache() with the
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG.
+ */
+ if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_marked_entries */
+
#ifdef H5C_DO_TAGGING_SANITY_CHECKS
/*-------------------------------------------------------------------------
- *
* Function: H5C_verify_tag
*
* Purpose: Performs sanity checking on an entrytype/tag pair.
@@ -703,7 +732,6 @@ done:
#endif
/*-------------------------------------------------------------------------
- *
* Function: H5C_flush_tagged_entries
*
* Purpose: Flushes all entries with the specified tag to disk.
@@ -744,7 +772,6 @@ done:
} /* H5C_flush_tagged_entries */
/*-------------------------------------------------------------------------
- *
* Function: H5C_retag_entries
*
* Purpose: Searches through cache index for all entries with the
@@ -786,7 +813,6 @@ H5C_retag_entries(H5C_t *cache, haddr_t src_tag, haddr_t dest_tag)
} /* H5C_retag_entries() */
/*-------------------------------------------------------------------------
- *
* Function: H5C__expunge_tag_type_metadata_cb
*
* Purpose: Expunge from the cache entries associated
@@ -822,7 +848,6 @@ done:
} /* H5C__expunge_tag_type_metadata_cb() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_expunge_tag_type_metadata
*
* Purpose: Search and expunge from the cache entries associated
@@ -866,7 +891,6 @@ done:
} /* H5C_expunge_tag_type_metadata() */
/*-------------------------------------------------------------------------
- *
* Function: H5C_get_tag()
*
* Purpose: Get the tag for a metadata cache entry.
diff --git a/src/Makefile.am b/src/Makefile.am
index 5650c38..a1c21e3 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -43,7 +43,8 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5lib_settings.c H5system.c \
H5B.c H5Bcache.c H5Bdbg.c \
H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2internal.c \
H5B2leaf.c H5B2stat.c H5B2test.c \
- H5C.c H5Cdbg.c H5Cepoch.c H5Cimage.c H5Clog.c H5Clog_json.c H5Clog_trace.c \
+ H5C.c H5Cdbg.c H5Centry.c H5Cepoch.c H5Cimage.c H5Cint.c \
+ H5Clog.c H5Clog_json.c H5Clog_trace.c \
H5Cprefetched.c H5Cquery.c H5Ctag.c H5Ctest.c \
H5CS.c \
H5CX.c \