summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/H5C.c9064
1 files changed, 4539 insertions, 4525 deletions
diff --git a/src/H5C.c b/src/H5C.c
index ff118f5..44f561c 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -3566,575 +3566,575 @@ done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) {
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_unprotect() */
+} /* H5C_unprotect() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_unsettle_entry_ring
- *
- * Purpose: Advise the metadata cache that the specified entry's free space
- * manager ring is no longer settled (if it was on entry).
- *
- * If the target free space manager ring is already
- * unsettled, do nothing, and return SUCCEED.
- *
- * If the target free space manager ring is settled, and
- * we are not in the process of a file shutdown, mark
- * the ring as unsettled, and return SUCCEED.
- *
- * If the target free space manager is settled, and we
- * are in the process of a file shutdown, post an error
- * message, and return FAIL.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * January 3, 2017
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_unsettle_entry_ring(void *_entry)
- {
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
- H5C_t *cache; /* Cache for file */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_unsettle_entry_ring
+ *
+ * Purpose: Advise the metadata cache that the specified entry's free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * January 3, 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unsettle_entry_ring(void *_entry)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
+ H5C_t *cache; /* Cache for file */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI(FAIL)
- /* Sanity checks */
- HDassert(entry);
- HDassert(entry->ring != H5C_RING_UNDEFINED);
- HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) ||
- (H5C_RING_MDFSM == entry->ring));
- cache = entry->cache_ptr;
- HDassert(cache);
- HDassert(cache->magic == H5C__H5C_T_MAGIC);
-
- switch (entry->ring) {
- case H5C_RING_USER:
- /* Do nothing */
- break;
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(entry->ring != H5C_RING_UNDEFINED);
+ HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) ||
+ (H5C_RING_MDFSM == entry->ring));
+ cache = entry->cache_ptr;
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ switch (entry->ring) {
+ case H5C_RING_USER:
+ /* Do nothing */
+ break;
- case H5C_RING_RDFSM:
- if (cache->rdfsm_settled) {
- if (cache->flush_in_progress || cache->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
- cache->rdfsm_settled = FALSE;
- } /* end if */
- break;
+ case H5C_RING_RDFSM:
+ if (cache->rdfsm_settled) {
+ if (cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
+ cache->rdfsm_settled = FALSE;
+ } /* end if */
+ break;
- case H5C_RING_MDFSM:
- if (cache->mdfsm_settled) {
- if (cache->flush_in_progress || cache->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
- cache->mdfsm_settled = FALSE;
- } /* end if */
- break;
+ case H5C_RING_MDFSM:
+ if (cache->mdfsm_settled) {
+ if (cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
+ cache->mdfsm_settled = FALSE;
+ } /* end if */
+ break;
- default:
- HDassert(FALSE); /* this should be un-reachable */
- break;
- } /* end switch */
+ default:
+ HDassert(FALSE); /* this should be un-reachable */
+ break;
+ } /* end switch */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_unsettle_entry_ring() */
-
- /*-------------------------------------------------------------------------
- * Function: H5C_unsettle_ring()
- *
- * Purpose: Advise the metadata cache that the specified free space
- * manager ring is no longer settled (if it was on entry).
- *
- * If the target free space manager ring is already
- * unsettled, do nothing, and return SUCCEED.
- *
- * If the target free space manager ring is settled, and
- * we are not in the process of a file shutdown, mark
- * the ring as unsettled, and return SUCCEED.
- *
- * If the target free space manager is settled, and we
- * are in the process of a file shutdown, post an error
- * message, and return FAIL.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 10/15/16
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_unsettle_ring(H5F_t * f, H5C_ring_t ring)
- {
- H5C_t *cache_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unsettle_entry_ring() */
- FUNC_ENTER_NOAPI(FAIL)
+/*-------------------------------------------------------------------------
+ * Function: H5C_unsettle_ring()
+ *
+ * Purpose: Advise the metadata cache that the specified free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/15/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring)
+{
+ H5C_t *cache_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->cache);
- HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring));
- cache_ptr = f->shared->cache;
- HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic);
+ FUNC_ENTER_NOAPI(FAIL)
- switch (ring) {
- case H5C_RING_RDFSM:
- if (cache_ptr->rdfsm_settled) {
- if (cache_ptr->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
- cache_ptr->rdfsm_settled = FALSE;
- } /* end if */
- break;
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring));
+ cache_ptr = f->shared->cache;
+ HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic);
+
+ switch (ring) {
+ case H5C_RING_RDFSM:
+ if (cache_ptr->rdfsm_settled) {
+ if (cache_ptr->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
+ cache_ptr->rdfsm_settled = FALSE;
+ } /* end if */
+ break;
- case H5C_RING_MDFSM:
- if (cache_ptr->mdfsm_settled) {
- if (cache_ptr->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
- cache_ptr->mdfsm_settled = FALSE;
- } /* end if */
- break;
+ case H5C_RING_MDFSM:
+ if (cache_ptr->mdfsm_settled) {
+ if (cache_ptr->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
+ cache_ptr->mdfsm_settled = FALSE;
+ } /* end if */
+ break;
- default:
- HDassert(FALSE); /* this should be un-reachable */
- break;
- } /* end switch */
+ default:
+ HDassert(FALSE); /* this should be un-reachable */
+ break;
+ } /* end switch */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_unsettle_ring() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unsettle_ring() */
- /*-------------------------------------------------------------------------
- * Function: H5C_validate_resize_config()
- *
- * Purpose: Run a sanity check on the specified sections of the
- * provided instance of struct H5C_auto_size_ctl_t.
- *
- * Do nothing and return SUCCEED if no errors are detected,
- * and flag an error and return FAIL otherwise.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 3/23/05
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr, unsigned int tests)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C_validate_resize_config()
+ *
+ * Purpose: Run a sanity check on the specified sections of the
+ * provided instance of struct H5C_auto_size_ctl_t.
+ *
+ * Do nothing and return SUCCEED if no errors are detected,
+ * and flag an error and return FAIL otherwise.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/23/05
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI(FAIL)
- if (config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
+ if (config_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
- if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
+ if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
- if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
- if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
+ if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
- if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small")
+ if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small")
- if (config_ptr->min_size > config_ptr->max_size)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
+ if (config_ptr->min_size > config_ptr->max_size)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
- if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) ||
- (config_ptr->initial_size > config_ptr->max_size)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "initial_size must be in the interval [min_size, max_size]")
+ if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) ||
+ (config_ptr->initial_size > config_ptr->max_size)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "initial_size must be in the interval [min_size, max_size]")
- if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "min_clean_fraction must be in the interval [0.0, 1.0]")
+ if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]")
- if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
+ if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
- if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
- } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
+ if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
+ } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
- if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
- if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
+ if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
- if (config_ptr->incr_mode == H5C_incr__threshold) {
- if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "lower_hr_threshold must be in the range [0.0, 1.0]")
+ if (config_ptr->incr_mode == H5C_incr__threshold) {
+ if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "lower_hr_threshold must be in the range [0.0, 1.0]")
- if (config_ptr->increment < 1.0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "increment must be greater than or equal to 1.0")
+ if (config_ptr->increment < 1.0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0")
- /* no need to check max_increment, as it is a size_t,
- * and thus must be non-negative.
- */
- } /* H5C_incr__threshold */
+ /* no need to check max_increment, as it is a size_t,
+ * and thus must be non-negative.
+ */
+ } /* H5C_incr__threshold */
- switch (config_ptr->flash_incr_mode) {
- case H5C_flash_incr__off:
- /* nothing to do here */
- break;
+ switch (config_ptr->flash_incr_mode) {
+ case H5C_flash_incr__off:
+ /* nothing to do here */
+ break;
- case H5C_flash_incr__add_space:
- if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "flash_multiple must be in the range [0.1, 10.0]")
- if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "flash_threshold must be in the range [0.1, 1.0]")
- break;
+ case H5C_flash_incr__add_space:
+ if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "flash_multiple must be in the range [0.1, 10.0]")
+ if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "flash_threshold must be in the range [0.1, 1.0]")
+ break;
- default:
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
- break;
- } /* end switch */
- } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
+ break;
+ } /* end switch */
+ } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
- if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) {
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) {
- if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) &&
- (config_ptr->decr_mode != H5C_decr__age_out) &&
- (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) {
+ if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) &&
+ (config_ptr->decr_mode != H5C_decr__age_out) &&
+ (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode")
- }
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode")
+ }
- if (config_ptr->decr_mode == H5C_decr__threshold) {
- if (config_ptr->upper_hr_threshold > 1.0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
+ if (config_ptr->decr_mode == H5C_decr__threshold) {
+ if (config_ptr->upper_hr_threshold > 1.0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
- if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
+ if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
- /* no need to check max_decrement as it is a size_t
- * and thus must be non-negative.
- */
- } /* H5C_decr__threshold */
+ /* no need to check max_decrement as it is a size_t
+ * and thus must be non-negative.
+ */
+ } /* H5C_decr__threshold */
- if ((config_ptr->decr_mode == H5C_decr__age_out) ||
- (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
+ if ((config_ptr->decr_mode == H5C_decr__age_out) ||
+ (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
- if (config_ptr->epochs_before_eviction < 1)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
- if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
+ if (config_ptr->epochs_before_eviction < 1)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
+ if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
- if ((config_ptr->apply_empty_reserve) &&
- ((config_ptr->empty_reserve > 1.0) || (config_ptr->empty_reserve < 0.0)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "empty_reserve must be in the interval [0.0, 1.0]")
+ if ((config_ptr->apply_empty_reserve) &&
+ ((config_ptr->empty_reserve > 1.0) || (config_ptr->empty_reserve < 0.0)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]")
- /* no need to check max_decrement as it is a size_t
- * and thus must be non-negative.
- */
- } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
+ /* no need to check max_decrement as it is a size_t
+ * and thus must be non-negative.
+ */
+ } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
- if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
- if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "upper_hr_threshold must be in the interval [0.0, 1.0]")
- } /* H5C_decr__age_out_with_threshold */
- } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
-
- if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) {
- if ((config_ptr->incr_mode == H5C_incr__threshold) &&
- ((config_ptr->decr_mode == H5C_decr__threshold) ||
- (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) &&
- (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
- } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
+ if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
+ if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "upper_hr_threshold must be in the interval [0.0, 1.0]")
+ } /* H5C_decr__age_out_with_threshold */
+ } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
+
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) {
+ if ((config_ptr->incr_mode == H5C_incr__threshold) &&
+ ((config_ptr->decr_mode == H5C_decr__threshold) ||
+ (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) &&
+ (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
+ } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_validate_resize_config() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_validate_resize_config() */
- /*-------------------------------------------------------------------------
- * Function: H5C_create_flush_dependency()
- *
- * Purpose: Initiates a parent<->child entry flush dependency. The parent
- * entry must be pinned or protected at the time of call, and must
- * have all dependencies removed before the cache can shut down.
- *
- * Note: Flush dependencies in the cache indicate that a child entry
- * must be flushed to the file before its parent. (This is
- * currently used to implement Single-Writer/Multiple-Reader (SWMR)
- * I/O access for data structures in the file).
- *
- * Creating a flush dependency between two entries will also pin
- * the parent entry.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_create_flush_dependency(void *parent_thing, void *child_thing)
- {
- H5C_t *cache_ptr;
- H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
- H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C_create_flush_dependency()
+ *
+ * Purpose: Initiates a parent<->child entry flush dependency. The parent
+ * entry must be pinned or protected at the time of call, and must
+ * have all dependencies removed before the cache can shut down.
+ *
+ * Note: Flush dependencies in the cache indicate that a child entry
+ * must be flushed to the file before its parent. (This is
+ * currently used to implement Single-Writer/Multiple-Reader (SWMR)
+ * I/O access for data structures in the file).
+ *
+ * Creating a flush dependency between two entries will also pin
+ * the parent entry.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_create_flush_dependency(void *parent_thing, void *child_thing)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
+ H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI(FAIL)
- /* Sanity checks */
- HDassert(parent_entry);
- HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(child_entry);
- HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(child_entry->addr));
- cache_ptr = parent_entry->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr == child_entry->cache_ptr);
+ /* Sanity checks */
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+ cache_ptr = parent_entry->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr == child_entry->cache_ptr);
#ifndef NDEBUG
- /* Make sure the parent is not already a parent */
- {
- unsigned u;
+ /* Make sure the parent is not already a parent */
+ {
+ unsigned u;
- for (u = 0; u < child_entry->flush_dep_nparents; u++)
- HDassert(child_entry->flush_dep_parent[u] != parent_entry);
- } /* end block */
-#endif /* NDEBUG */
+ for (u = 0; u < child_entry->flush_dep_nparents; u++)
+ HDassert(child_entry->flush_dep_parent[u] != parent_entry);
+ } /* end block */
+#endif /* NDEBUG */
- /* More sanity checks */
- if (child_entry == parent_entry)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL,
- "Child entry flush dependency parent can't be itself")
- if (!(parent_entry->is_protected || parent_entry->is_pinned))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
+ /* More sanity checks */
+ if (child_entry == parent_entry)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
+ if (!(parent_entry->is_protected || parent_entry->is_pinned))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
- /* Check for parent not pinned */
- if (!parent_entry->is_pinned) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_nchildren == 0);
- HDassert(!parent_entry->pinned_from_client);
- HDassert(!parent_entry->pinned_from_cache);
+ /* Check for parent not pinned */
+ if (!parent_entry->is_pinned) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_nchildren == 0);
+ HDassert(!parent_entry->pinned_from_client);
+ HDassert(!parent_entry->pinned_from_cache);
- /* Pin the parent entry */
- parent_entry->is_pinned = TRUE;
- H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
- } /* end else */
+ /* Pin the parent entry */
+ parent_entry->is_pinned = TRUE;
+ H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
+ } /* end else */
- /* Mark the entry as pinned from the cache's action (possibly redundantly) */
- parent_entry->pinned_from_cache = TRUE;
+ /* Mark the entry as pinned from the cache's action (possibly redundantly) */
+ parent_entry->pinned_from_cache = TRUE;
- /* Check if we need to resize the child's parent array */
- if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
- if (child_entry->flush_dep_parent_nalloc == 0) {
- /* Array does not exist yet, allocate it */
- HDassert(!child_entry->flush_dep_parent);
+ /* Check if we need to resize the child's parent array */
+ if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
+ if (child_entry->flush_dep_parent_nalloc == 0) {
+ /* Array does not exist yet, allocate it */
+ HDassert(!child_entry->flush_dep_parent);
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
- } /* end if */
- else {
- /* Resize existing array */
- HDassert(child_entry->flush_dep_parent);
-
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
- 2 * child_entry->flush_dep_parent_nalloc)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc *= 2;
- } /* end else */
- cache_ptr->entry_fd_height_change_counter++;
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
} /* end if */
+ else {
+ /* Resize existing array */
+ HDassert(child_entry->flush_dep_parent);
+
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
+ 2 * child_entry->flush_dep_parent_nalloc)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc *= 2;
+ } /* end else */
+ cache_ptr->entry_fd_height_change_counter++;
+ } /* end if */
- /* Add the dependency to the child's parent array */
- child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
- child_entry->flush_dep_nparents++;
+ /* Add the dependency to the child's parent array */
+ child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
+ child_entry->flush_dep_nparents++;
- /* Increment parent's number of children */
- parent_entry->flush_dep_nchildren++;
+ /* Increment parent's number of children */
+ parent_entry->flush_dep_nchildren++;
- /* Adjust the number of dirty children */
- if (child_entry->is_dirty) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
+ /* Adjust the number of dirty children */
+ if (child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
- parent_entry->flush_dep_ndirty_children++;
+ parent_entry->flush_dep_ndirty_children++;
- /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag set")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag set")
+ } /* end if */
- /* adjust the parent's number of unserialized children. Note
- * that it is possible for and entry to be clean and unserialized.
- */
- if (!child_entry->image_up_to_date) {
- HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
+ /* adjust the parent's number of unserialized children. Note
+ * that it is possible for and entry to be clean and unserialized.
+ */
+ if (!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
- parent_entry->flush_dep_nunser_children++;
+ parent_entry->flush_dep_nunser_children++;
- /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag reset")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag reset")
+ } /* end if */
- /* Post-conditions, for successful operation */
- HDassert(parent_entry->is_pinned);
- HDassert(parent_entry->flush_dep_nchildren > 0);
- HDassert(child_entry->flush_dep_parent);
- HDassert(child_entry->flush_dep_nparents > 0);
- HDassert(child_entry->flush_dep_parent_nalloc > 0);
+ /* Post-conditions, for successful operation */
+ HDassert(parent_entry->is_pinned);
+ HDassert(parent_entry->flush_dep_nchildren > 0);
+ HDassert(child_entry->flush_dep_parent);
+ HDassert(child_entry->flush_dep_nparents > 0);
+ HDassert(child_entry->flush_dep_parent_nalloc > 0);
#ifndef NDEBUG
- H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
+ H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
#endif /* NDEBUG */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_create_flush_dependency() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_create_flush_dependency() */
- /*-------------------------------------------------------------------------
- * Function: H5C_destroy_flush_dependency()
- *
- * Purpose: Terminates a parent<-> child entry flush dependency. The
- * parent entry must be pinned.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing)
- {
- H5C_t *cache_ptr;
- H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
- H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C_destroy_flush_dependency()
+ *
+ * Purpose: Terminates a parent<-> child entry flush dependency. The
+ * parent entry must be pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_destroy_flush_dependency(void *parent_thing, void *child_thing)
+{
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
+ H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI(FAIL)
- /* Sanity checks */
- HDassert(parent_entry);
- HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(child_entry);
- HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(child_entry->addr));
- cache_ptr = parent_entry->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr == child_entry->cache_ptr);
-
- /* Usage checks */
- if (!parent_entry->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
- if (NULL == child_entry->flush_dep_parent)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Child entry doesn't have a flush dependency parent array")
- if (0 == parent_entry->flush_dep_nchildren)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Parent entry flush dependency ref. count has no child dependencies")
-
- /* Search for parent in child's parent array. This is a linear search
- * because we do not expect large numbers of parents. If this changes, we
- * may wish to change the parent array to a skip list */
- for (u = 0; u < child_entry->flush_dep_nparents; u++)
- if (child_entry->flush_dep_parent[u] == parent_entry)
- break;
- if (u == child_entry->flush_dep_nparents)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Parent entry isn't a flush dependency parent for child entry")
-
- /* Remove parent entry from child's parent array */
- if (u < (child_entry->flush_dep_nparents - 1))
- HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1],
- (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
- child_entry->flush_dep_nparents--;
-
- /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
- parent_entry->flush_dep_nchildren--;
- if (0 == parent_entry->flush_dep_nchildren) {
- /* Sanity check */
- HDassert(parent_entry->pinned_from_cache);
+ /* Sanity checks */
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+ cache_ptr = parent_entry->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr == child_entry->cache_ptr);
+
+ /* Usage checks */
+ if (!parent_entry->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
+ if (NULL == child_entry->flush_dep_parent)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Child entry doesn't have a flush dependency parent array")
+ if (0 == parent_entry->flush_dep_nchildren)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Parent entry flush dependency ref. count has no child dependencies")
+
+ /* Search for parent in child's parent array. This is a linear search
+ * because we do not expect large numbers of parents. If this changes, we
+ * may wish to change the parent array to a skip list */
+ for (u = 0; u < child_entry->flush_dep_nparents; u++)
+ if (child_entry->flush_dep_parent[u] == parent_entry)
+ break;
+ if (u == child_entry->flush_dep_nparents)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Parent entry isn't a flush dependency parent for child entry")
+
+ /* Remove parent entry from child's parent array */
+ if (u < (child_entry->flush_dep_nparents - 1))
+ HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1],
+ (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
+ child_entry->flush_dep_nparents--;
+
+ /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
+ parent_entry->flush_dep_nchildren--;
+ if (0 == parent_entry->flush_dep_nchildren) {
+ /* Sanity check */
+ HDassert(parent_entry->pinned_from_cache);
- /* Check if we should unpin parent entry now */
- if (!parent_entry->pinned_from_client)
- if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
+ /* Check if we should unpin parent entry now */
+ if (!parent_entry->pinned_from_client)
+ if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
- /* Mark the entry as unpinned from the cache's action */
- parent_entry->pinned_from_cache = FALSE;
- } /* end if */
+ /* Mark the entry as unpinned from the cache's action */
+ parent_entry->pinned_from_cache = FALSE;
+ } /* end if */
- /* Adjust parent entry's ndirty_children */
- if (child_entry->is_dirty) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_ndirty_children > 0);
+ /* Adjust parent entry's ndirty_children */
+ if (child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children > 0);
- parent_entry->flush_dep_ndirty_children--;
+ parent_entry->flush_dep_ndirty_children--;
- /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag reset")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag reset")
+ } /* end if */
- /* adjust parent entry's number of unserialized children */
- if (!child_entry->image_up_to_date) {
- HDassert(parent_entry->flush_dep_nunser_children > 0);
+ /* adjust parent entry's number of unserialized children */
+ if (!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children > 0);
- parent_entry->flush_dep_nunser_children--;
+ parent_entry->flush_dep_nunser_children--;
- /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag set")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag set")
+ } /* end if */
- /* Shrink or free the parent array if appropriate */
- if (child_entry->flush_dep_nparents == 0) {
- child_entry->flush_dep_parent =
- H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent);
- child_entry->flush_dep_parent_nalloc = 0;
- } /* end if */
- else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT &&
- child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
- child_entry->flush_dep_parent_nalloc / 4)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc /= 4;
- } /* end if */
+ /* Shrink or free the parent array if appropriate */
+ if (child_entry->flush_dep_nparents == 0) {
+ child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent);
+ child_entry->flush_dep_parent_nalloc = 0;
+ } /* end if */
+ else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT &&
+ child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
+ child_entry->flush_dep_parent_nalloc / 4)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc /= 4;
+ } /* end if */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_destroy_flush_dependency() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_destroy_flush_dependency() */
/*************************************************************************/
/**************************** Private Functions: *************************/
@@ -4153,3622 +4153,3623 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_COLLECT_CACHE_STATS
- static herr_t H5C__pin_entry_from_client(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr)
+static herr_t
+H5C__pin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
#else
static herr_t
H5C__pin_entry_from_client(H5C_t H5_ATTR_UNUSED *cache_ptr, H5C_cache_entry_t *entry_ptr)
#endif
- {
- herr_t ret_value = SUCCEED; /* Return value */
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
- HDassert(entry_ptr->is_protected);
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_protected);
- /* Check if the entry is already pinned */
- if (entry_ptr->is_pinned) {
- /* Check if the entry was pinned through an explicit pin from a client */
- if (entry_ptr->pinned_from_client)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
- } /* end if */
- else {
- entry_ptr->is_pinned = TRUE;
+ /* Check if the entry is already pinned */
+ if (entry_ptr->is_pinned) {
+ /* Check if the entry was pinned through an explicit pin from a client */
+ if (entry_ptr->pinned_from_client)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
+ } /* end if */
+ else {
+ entry_ptr->is_pinned = TRUE;
- H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
- } /* end else */
+ H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
+ } /* end else */
- /* Mark that the entry was pinned through an explicit pin from a client */
- entry_ptr->pinned_from_client = TRUE;
+ /* Mark that the entry was pinned through an explicit pin from a client */
+ entry_ptr->pinned_from_client = TRUE;
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__pin_entry_from_client() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__pin_entry_from_client() */
- /*-------------------------------------------------------------------------
- * Function: H5C__unpin_entry_real()
- *
- * Purpose: Internal routine to unpin a cache entry.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 1/6/18
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__unpin_entry_real(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr, hbool_t update_rp)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__unpin_entry_real()
+ *
+ * Purpose: Internal routine to unpin a cache entry.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 1/6/18
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checking */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
- HDassert(entry_ptr->is_pinned);
+ /* Sanity checking */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_pinned);
- /* If requested, update the replacement policy if the entry is not protected */
- if (update_rp && !entry_ptr->is_protected)
- H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
+ /* If requested, update the replacement policy if the entry is not protected */
+ if (update_rp && !entry_ptr->is_protected)
+ H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
- /* Unpin the entry now */
- entry_ptr->is_pinned = FALSE;
+ /* Unpin the entry now */
+ entry_ptr->is_pinned = FALSE;
- /* Update the stats for an unpin operation */
- H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+ /* Update the stats for an unpin operation */
+ H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__unpin_entry_real() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__unpin_entry_real() */
- /*-------------------------------------------------------------------------
- * Function: H5C__unpin_entry_from_client()
- *
- * Purpose: Internal routine to unpin a cache entry from a client action.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/24/09
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__unpin_entry_from_client(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr,
- hbool_t update_rp)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__unpin_entry_from_client()
+ *
+ * Purpose: Internal routine to unpin a cache entry from a client action.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/24/09
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checking */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
+ /* Sanity checking */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
- /* Error checking (should be sanity checks?) */
- if (!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
- if (!entry_ptr->pinned_from_client)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
+ /* Error checking (should be sanity checks?) */
+ if (!entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
+ if (!entry_ptr->pinned_from_client)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
- /* Check if the entry is not pinned from a flush dependency */
- if (!entry_ptr->pinned_from_cache)
- if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
+ /* Check if the entry is not pinned from a flush dependency */
+ if (!entry_ptr->pinned_from_cache)
+ if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
- /* Mark the entry as explicitly unpinned by the client */
- entry_ptr->pinned_from_client = FALSE;
+ /* Mark the entry as explicitly unpinned by the client */
+ entry_ptr->pinned_from_client = FALSE;
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__unpin_entry_from_client() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__unpin_entry_from_client() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__auto_adjust_cache_size
- *
- * Purpose: Obtain the current full cache hit rate, and compare it
- * with the hit rate thresholds for modifying cache size.
- * If one of the thresholds has been crossed, adjusts the
- * size of the cache accordingly.
- *
- * The function then resets the full cache hit rate
- * statistics, and exits.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- *
- * Programmer: John Mainzer, 10/7/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__auto_adjust_cache_size(H5F_t * f, hbool_t write_permitted)
- {
- H5C_t *cache_ptr = f->shared->cache;
- hbool_t reentrant_call = FALSE;
- hbool_t inserted_epoch_marker = FALSE;
- size_t new_max_cache_size = 0;
- size_t old_max_cache_size = 0;
- size_t new_min_clean_size = 0;
- size_t old_min_clean_size = 0;
- double hit_rate;
- enum H5C_resize_status status = in_spec; /* will change if needed */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length);
- HDassert((double)0.0f <= (cache_ptr->resize_ctl).min_clean_fraction);
- HDassert((cache_ptr->resize_ctl).min_clean_fraction <= (double)100.0f);
-
- /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the resize
- * process. To avoid an infinite recursion, set reentrant_call to
- * TRUE, and goto done.
- */
- if (cache_ptr->resize_in_progress) {
- reentrant_call = TRUE;
- HGOTO_DONE(SUCCEED)
- } /* end if */
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__auto_adjust_cache_size
+ *
+ * Purpose: Obtain the current full cache hit rate, and compare it
+ * with the hit rate thresholds for modifying cache size.
+ * If one of the thresholds has been crossed, adjusts the
+ * size of the cache accordingly.
+ *
+ * The function then resets the full cache hit rate
+ * statistics, and exits.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 10/7/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ hbool_t reentrant_call = FALSE;
+ hbool_t inserted_epoch_marker = FALSE;
+ size_t new_max_cache_size = 0;
+ size_t old_max_cache_size = 0;
+ size_t new_min_clean_size = 0;
+ size_t old_min_clean_size = 0;
+ double hit_rate;
+ enum H5C_resize_status status = in_spec; /* will change if needed */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
- cache_ptr->resize_in_progress = TRUE;
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length);
+ HDassert((double)0.0f <= (cache_ptr->resize_ctl).min_clean_fraction);
+ HDassert((cache_ptr->resize_ctl).min_clean_fraction <= (double)100.0f);
+
+ /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the resize
+ * process. To avoid an infinite recursion, set reentrant_call to
+ * TRUE, and goto done.
+ */
+ if (cache_ptr->resize_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
- if (!cache_ptr->resize_enabled)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
+ cache_ptr->resize_in_progress = TRUE;
- HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) ||
- ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
+ if (!cache_ptr->resize_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
- if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+ HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) ||
+ ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
- HDassert(((double)0.0f <= hit_rate) && (hit_rate <= (double)1.0f));
+ if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
- switch ((cache_ptr->resize_ctl).incr_mode) {
- case H5C_incr__off:
- if (cache_ptr->size_increase_possible)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
- break;
+ HDassert(((double)0.0f <= hit_rate) && (hit_rate <= (double)1.0f));
- case H5C_incr__threshold:
- if (hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold) {
+ switch ((cache_ptr->resize_ctl).incr_mode) {
+ case H5C_incr__off:
+ if (cache_ptr->size_increase_possible)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
+ break;
- if (!cache_ptr->size_increase_possible) {
+ case H5C_incr__threshold:
+ if (hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold) {
- status = increase_disabled;
- }
- else if (cache_ptr->max_cache_size >= (cache_ptr->resize_ctl).max_size) {
+ if (!cache_ptr->size_increase_possible) {
- HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).max_size);
- status = at_max_size;
- }
- else if (!cache_ptr->cache_full) {
+ status = increase_disabled;
+ }
+ else if (cache_ptr->max_cache_size >= (cache_ptr->resize_ctl).max_size) {
- status = not_full;
- }
- else {
+ HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).max_size);
+ status = at_max_size;
+ }
+ else if (!cache_ptr->cache_full) {
- new_max_cache_size = (size_t)(((double)(cache_ptr->max_cache_size)) *
- (cache_ptr->resize_ctl).increment);
+ status = not_full;
+ }
+ else {
- /* clip to max size if necessary */
- if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
+ new_max_cache_size =
+ (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl).increment);
- new_max_cache_size = (cache_ptr->resize_ctl).max_size;
- }
+ /* clip to max size if necessary */
+ if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
- /* clip to max increment if necessary */
- if (((cache_ptr->resize_ctl).apply_max_increment) &&
- ((cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment) <
- new_max_cache_size)) {
+ new_max_cache_size = (cache_ptr->resize_ctl).max_size;
+ }
- new_max_cache_size =
- cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment;
- }
+ /* clip to max increment if necessary */
+ if (((cache_ptr->resize_ctl).apply_max_increment) &&
+ ((cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment) <
+ new_max_cache_size)) {
- status = increase;
+ new_max_cache_size =
+ cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment;
}
- }
- break;
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
- }
+ status = increase;
+ }
+ }
+ break;
- /* If the decr_mode is either age out or age out with threshold, we
- * must run the marker maintenance code, whether we run the size
- * reduction code or not. We do this in two places -- here we
- * insert a new marker if the number of active epoch markers is
- * is less than the the current epochs before eviction, and after
- * the ageout call, we cycle the markers.
- *
- * However, we can't call the ageout code or cycle the markers
- * unless there was a full complement of markers in place on
- * entry. The inserted_epoch_marker flag is used to track this.
- */
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
+ }
- if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
- ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
- (cache_ptr->epoch_markers_active < (cache_ptr->resize_ctl).epochs_before_eviction)) {
+ /* If the decr_mode is either age out or age out with threshold, we
+ * must run the marker maintenance code, whether we run the size
+ * reduction code or not. We do this in two places -- here we
+ * insert a new marker if the number of active epoch markers is
+ * is less than the the current epochs before eviction, and after
+ * the ageout call, we cycle the markers.
+ *
+ * However, we can't call the ageout code or cycle the markers
+ * unless there was a full complement of markers in place on
+ * entry. The inserted_epoch_marker flag is used to track this.
+ */
- if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
+ if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
+ ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
+ (cache_ptr->epoch_markers_active < (cache_ptr->resize_ctl).epochs_before_eviction)) {
- inserted_epoch_marker = TRUE;
- }
+ if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
- /* don't run the cache size decrease code unless the cache size
- * increase code is disabled, or the size increase code sees no need
- * for action. In either case, status == in_spec at this point.
- */
+ inserted_epoch_marker = TRUE;
+ }
- if (status == in_spec) {
+ /* don't run the cache size decrease code unless the cache size
+ * increase code is disabled, or the size increase code sees no need
+ * for action. In either case, status == in_spec at this point.
+ */
- switch ((cache_ptr->resize_ctl).decr_mode) {
- case H5C_decr__off:
- break;
+ if (status == in_spec) {
- case H5C_decr__threshold:
- if (hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold) {
+ switch ((cache_ptr->resize_ctl).decr_mode) {
+ case H5C_decr__off:
+ break;
- if (!cache_ptr->size_decrease_possible) {
+ case H5C_decr__threshold:
+ if (hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold) {
- status = decrease_disabled;
- }
- else if (cache_ptr->max_cache_size <= (cache_ptr->resize_ctl).min_size) {
+ if (!cache_ptr->size_decrease_possible) {
- HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).min_size);
- status = at_min_size;
- }
- else {
+ status = decrease_disabled;
+ }
+ else if (cache_ptr->max_cache_size <= (cache_ptr->resize_ctl).min_size) {
- new_max_cache_size = (size_t)(((double)(cache_ptr->max_cache_size)) *
- (cache_ptr->resize_ctl).decrement);
+ HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).min_size);
+ status = at_min_size;
+ }
+ else {
- /* clip to min size if necessary */
- if (new_max_cache_size < (cache_ptr->resize_ctl).min_size) {
+ new_max_cache_size = (size_t)(((double)(cache_ptr->max_cache_size)) *
+ (cache_ptr->resize_ctl).decrement);
- new_max_cache_size = (cache_ptr->resize_ctl).min_size;
- }
+ /* clip to min size if necessary */
+ if (new_max_cache_size < (cache_ptr->resize_ctl).min_size) {
- /* clip to max decrement if necessary */
- if (((cache_ptr->resize_ctl).apply_max_decrement) &&
- (((cache_ptr->resize_ctl).max_decrement + new_max_cache_size) <
- cache_ptr->max_cache_size)) {
+ new_max_cache_size = (cache_ptr->resize_ctl).min_size;
+ }
- new_max_cache_size =
- cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
- }
+ /* clip to max decrement if necessary */
+ if (((cache_ptr->resize_ctl).apply_max_decrement) &&
+ (((cache_ptr->resize_ctl).max_decrement + new_max_cache_size) <
+ cache_ptr->max_cache_size)) {
- status = decrease;
+ new_max_cache_size =
+ cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
}
+
+ status = decrease;
}
- break;
+ }
+ break;
- case H5C_decr__age_out_with_threshold:
- case H5C_decr__age_out:
- if (!inserted_epoch_marker) {
- if (!cache_ptr->size_decrease_possible)
- status = decrease_disabled;
- else {
- if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size,
- write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
- } /* end else */
- } /* end if */
- break;
+ case H5C_decr__age_out_with_threshold:
+ case H5C_decr__age_out:
+ if (!inserted_epoch_marker) {
+ if (!cache_ptr->size_decrease_possible)
+ status = decrease_disabled;
+ else {
+ if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size,
+ write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
+ } /* end else */
+ } /* end if */
+ break;
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
- }
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
}
+ }
- /* cycle the epoch markers here if appropriate */
- if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
- ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
- (!inserted_epoch_marker)) {
+ /* cycle the epoch markers here if appropriate */
+ if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
+ ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
+ (!inserted_epoch_marker)) {
- /* move last epoch marker to the head of the LRU list */
- if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
- }
+ /* move last epoch marker to the head of the LRU list */
+ if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
+ }
- if ((status == increase) || (status == decrease)) {
+ if ((status == increase) || (status == decrease)) {
- old_max_cache_size = cache_ptr->max_cache_size;
- old_min_clean_size = cache_ptr->min_clean_size;
+ old_max_cache_size = cache_ptr->max_cache_size;
+ old_min_clean_size = cache_ptr->min_clean_size;
- new_min_clean_size =
- (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
+ new_min_clean_size =
+ (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
- /* new_min_clean_size is of size_t, and thus must be non-negative.
- * Hence we have
- *
- * ( 0 <= new_min_clean_size ).
- *
- * by definition.
- */
- HDassert(new_min_clean_size <= new_max_cache_size);
- HDassert((cache_ptr->resize_ctl).min_size <= new_max_cache_size);
- HDassert(new_max_cache_size <= (cache_ptr->resize_ctl).max_size);
+ /* new_min_clean_size is of size_t, and thus must be non-negative.
+ * Hence we have
+ *
+ * ( 0 <= new_min_clean_size ).
+ *
+ * by definition.
+ */
+ HDassert(new_min_clean_size <= new_max_cache_size);
+ HDassert((cache_ptr->resize_ctl).min_size <= new_max_cache_size);
+ HDassert(new_max_cache_size <= (cache_ptr->resize_ctl).max_size);
- cache_ptr->max_cache_size = new_max_cache_size;
- cache_ptr->min_clean_size = new_min_clean_size;
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
- if (status == increase) {
+ if (status == increase) {
- cache_ptr->cache_full = FALSE;
- }
- else if (status == decrease) {
+ cache_ptr->cache_full = FALSE;
+ }
+ else if (status == decrease) {
- cache_ptr->size_decreased = TRUE;
- }
+ cache_ptr->size_decreased = TRUE;
+ }
- /* update flash cache size increase fields as appropriate */
- if (cache_ptr->flash_size_increase_possible) {
+ /* update flash cache size increase fields as appropriate */
+ if (cache_ptr->flash_size_increase_possible) {
- switch ((cache_ptr->resize_ctl).flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
+ switch ((cache_ptr->resize_ctl).flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
- case H5C_flash_incr__add_space:
- cache_ptr->flash_size_increase_threshold =
- (size_t)(((double)(cache_ptr->max_cache_size)) *
- ((cache_ptr->resize_ctl).flash_threshold));
- break;
+ case H5C_flash_incr__add_space:
+ cache_ptr->flash_size_increase_threshold =
+ (size_t)(((double)(cache_ptr->max_cache_size)) *
+ ((cache_ptr->resize_ctl).flash_threshold));
+ break;
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
- }
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
}
}
+ }
- if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
- (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate,
- status, old_max_cache_size, new_max_cache_size,
- old_min_clean_size, new_min_clean_size);
- }
+ if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
+ (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status,
+ old_max_cache_size, new_max_cache_size, old_min_clean_size,
+ new_min_clean_size);
+ }
- if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
- /* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
done:
- /* Sanity checks */
- HDassert(cache_ptr->resize_in_progress);
- if (!reentrant_call)
- cache_ptr->resize_in_progress = FALSE;
- HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
-
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__auto_adjust_cache_size() */
+ /* Sanity checks */
+ HDassert(cache_ptr->resize_in_progress);
+ if (!reentrant_call)
+ cache_ptr->resize_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout
- *
- * Purpose: Implement the ageout automatic cache size decrement
- * algorithm. Note that while this code evicts aged out
- * entries, the code does not change the maximum cache size.
- * Instead, the function simply computes the new value (if
- * any change is indicated) and reports this value in
- * *new_max_cache_size_ptr.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- *
- * Programmer: John Mainzer, 11/18/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout(H5F_t * f, double hit_rate, enum H5C_resize_status *status_ptr,
- size_t *new_max_cache_size_ptr, hbool_t write_permitted)
- {
- H5C_t *cache_ptr = f->shared->cache;
- size_t test_size;
- herr_t ret_value = SUCCEED; /* Return value */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__auto_adjust_cache_size() */
- FUNC_ENTER_NOAPI_NOINIT
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout
+ *
+ * Purpose: Implement the ageout automatic cache size decrement
+ * algorithm. Note that while this code evicts aged out
+ * entries, the code does not change the maximum cache size.
+ * Instead, the function simply computes the new value (if
+ * any change is indicated) and reports this value in
+ * *new_max_cache_size_ptr.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 11/18/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
+ size_t *new_max_cache_size_ptr, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ size_t test_size;
+ herr_t ret_value = SUCCEED; /* Return value */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert((status_ptr) && (*status_ptr == in_spec));
- HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0));
+ FUNC_ENTER_NOAPI_NOINIT
- /* remove excess epoch markers if any */
- if (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
- if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert((status_ptr) && (*status_ptr == in_spec));
+ HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0));
- if (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
- (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold) &&
- (hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold))) {
+ /* remove excess epoch markers if any */
+ if (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
+ if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
- if (cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size) {
+ if (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
+ (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold) &&
+ (hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold))) {
- /* evict aged out cache entries if appropriate... */
- if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
+ if (cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size) {
- /* ... and then reduce cache size if appropriate */
- if (cache_ptr->index_size < cache_ptr->max_cache_size) {
+ /* evict aged out cache entries if appropriate... */
+ if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
- if ((cache_ptr->resize_ctl).apply_empty_reserve) {
+ /* ... and then reduce cache size if appropriate */
+ if (cache_ptr->index_size < cache_ptr->max_cache_size) {
- test_size = (size_t)(((double)cache_ptr->index_size) /
- (1 - (cache_ptr->resize_ctl).empty_reserve));
+ if ((cache_ptr->resize_ctl).apply_empty_reserve) {
- if (test_size < cache_ptr->max_cache_size) {
+ test_size = (size_t)(((double)cache_ptr->index_size) /
+ (1 - (cache_ptr->resize_ctl).empty_reserve));
- *status_ptr = decrease;
- *new_max_cache_size_ptr = test_size;
- }
- }
- else {
+ if (test_size < cache_ptr->max_cache_size) {
*status_ptr = decrease;
- *new_max_cache_size_ptr = cache_ptr->index_size;
+ *new_max_cache_size_ptr = test_size;
}
+ }
+ else {
- if (*status_ptr == decrease) {
+ *status_ptr = decrease;
+ *new_max_cache_size_ptr = cache_ptr->index_size;
+ }
- /* clip to min size if necessary */
- if (*new_max_cache_size_ptr < (cache_ptr->resize_ctl).min_size) {
+ if (*status_ptr == decrease) {
- *new_max_cache_size_ptr = (cache_ptr->resize_ctl).min_size;
- }
+ /* clip to min size if necessary */
+ if (*new_max_cache_size_ptr < (cache_ptr->resize_ctl).min_size) {
- /* clip to max decrement if necessary */
- if (((cache_ptr->resize_ctl).apply_max_decrement) &&
- (((cache_ptr->resize_ctl).max_decrement + *new_max_cache_size_ptr) <
- cache_ptr->max_cache_size)) {
+ *new_max_cache_size_ptr = (cache_ptr->resize_ctl).min_size;
+ }
- *new_max_cache_size_ptr =
- cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
- }
+ /* clip to max decrement if necessary */
+ if (((cache_ptr->resize_ctl).apply_max_decrement) &&
+ (((cache_ptr->resize_ctl).max_decrement + *new_max_cache_size_ptr) <
+ cache_ptr->max_cache_size)) {
+
+ *new_max_cache_size_ptr =
+ cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
}
}
}
- else {
+ }
+ else {
- *status_ptr = at_min_size;
- }
+ *status_ptr = at_min_size;
}
+ }
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout() */
+} /* H5C__autoadjust__ageout() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__cycle_epoch_marker
- *
- * Purpose: Remove the oldest epoch marker from the LRU list,
- * and reinsert it at the head of the LRU list. Also
- * remove the epoch marker's index from the head of the
- * ring buffer, and re-insert it at the tail of the ring
- * buffer.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__cycle_epoch_marker
+ *
+ * Purpose: Remove the oldest epoch marker from the LRU list,
+ * and reinsert it at the head of the LRU list. Also
+ * remove the epoch marker's index from the head of the
+ * ring buffer, and re-insert it at the tail of the ring
+ * buffer.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_NOINIT
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if (cache_ptr->epoch_markers_active <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
+ if (cache_ptr->epoch_markers_active <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
- /* remove the last marker from both the ring buffer and the LRU list */
+ /* remove the last marker from both the ring buffer and the LRU list */
- i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
+ i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- cache_ptr->epoch_marker_ringbuf_size -= 1;
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
- if (cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- if ((cache_ptr->epoch_marker_active)[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ if (cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if ((cache_ptr->epoch_marker_active)[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
+ (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
- /* now, re-insert it at the head of the LRU list, and at the tail of
- * the ring buffer.
- */
+ /* now, re-insert it at the head of the LRU list, and at the tail of
+ * the ring buffer.
+ */
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
- cache_ptr->epoch_marker_ringbuf_last =
- (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
+ (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
- cache_ptr->epoch_marker_ringbuf_size += 1;
+ cache_ptr->epoch_marker_ringbuf_size += 1;
- if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+ if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
- H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
+ (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__cycle_epoch_marker() */
+} /* H5C__autoadjust__ageout__cycle_epoch_marker() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__evict_aged_out_entries
- *
- * Purpose: Evict clean entries in the cache that haven't
- * been accessed for at least
- * (cache_ptr->resize_ctl).epochs_before_eviction epochs,
- * and flush dirty entries that haven't been accessed for
- * that amount of time.
- *
- * Depending on configuration, the function will either
- * flush or evict all such entries, or all such entries it
- * encounters until it has freed the maximum amount of space
- * allowed under the maximum decrement.
- *
- * If we are running in parallel mode, writes may not be
- * permitted. If so, the function simply skips any dirty
- * entries it may encounter.
- *
- * The function makes no attempt to maintain the minimum
- * clean size, as there is no guarantee that the cache size
- * will be changed.
- *
- * If there is no cache size change, the minimum clean size
- * constraint will be met through a combination of clean
- * entries and free space in the cache.
- *
- * If there is a cache size reduction, the minimum clean size
- * will be re-calculated, and will be enforced the next time
- * we have to make space in the cache.
- *
- * Observe that this function cannot occasion a read.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, hbool_t write_permitted)
- {
- H5C_t *cache_ptr = f->shared->cache;
- size_t eviction_size_limit;
- size_t bytes_evicted = 0;
- hbool_t prev_is_dirty = FALSE;
- hbool_t restart_scan;
- H5C_cache_entry_t *entry_ptr;
- H5C_cache_entry_t *next_ptr;
- H5C_cache_entry_t *prev_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- /* if there is a limit on the amount that the cache size can be decrease
- * in any one round of the cache size reduction algorithm, load that
- * limit into eviction_size_limit. Otherwise, set eviction_size_limit
- * to the equivalent of infinity. The current size of the index will
- * do nicely.
- */
- if ((cache_ptr->resize_ctl).apply_max_decrement) {
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__evict_aged_out_entries
+ *
+ * Purpose: Evict clean entries in the cache that haven't
+ * been accessed for at least
+ * (cache_ptr->resize_ctl).epochs_before_eviction epochs,
+ * and flush dirty entries that haven't been accessed for
+ * that amount of time.
+ *
+ * Depending on configuration, the function will either
+ * flush or evict all such entries, or all such entries it
+ * encounters until it has freed the maximum amount of space
+ * allowed under the maximum decrement.
+ *
+ * If we are running in parallel mode, writes may not be
+ * permitted. If so, the function simply skips any dirty
+ * entries it may encounter.
+ *
+ * The function makes no attempt to maintain the minimum
+ * clean size, as there is no guarantee that the cache size
+ * will be changed.
+ *
+ * If there is no cache size change, the minimum clean size
+ * constraint will be met through a combination of clean
+ * entries and free space in the cache.
+ *
+ * If there is a cache size reduction, the minimum clean size
+ * will be re-calculated, and will be enforced the next time
+ * we have to make space in the cache.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ size_t eviction_size_limit;
+ size_t bytes_evicted = 0;
+ hbool_t prev_is_dirty = FALSE;
+ hbool_t restart_scan;
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *next_ptr;
+ H5C_cache_entry_t *prev_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
- eviction_size_limit = (cache_ptr->resize_ctl).max_decrement;
- }
- else {
+ FUNC_ENTER_NOAPI_NOINIT
- eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
- }
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if (write_permitted) {
+ /* if there is a limit on the amount that the cache size can be decrease
+ * in any one round of the cache size reduction algorithm, load that
+ * limit into eviction_size_limit. Otherwise, set eviction_size_limit
+ * to the equivalent of infinity. The current size of the index will
+ * do nicely.
+ */
+ if ((cache_ptr->resize_ctl).apply_max_decrement) {
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
+ eviction_size_limit = (cache_ptr->resize_ctl).max_decrement;
+ }
+ else {
- while ((entry_ptr != NULL) && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
- (bytes_evicted < eviction_size_limit)) {
- hbool_t skipping_entry = FALSE;
+ eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
+ }
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
+ if (write_permitted) {
- next_ptr = entry_ptr->next;
- prev_ptr = entry_ptr->prev;
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
- if (prev_ptr != NULL)
- prev_is_dirty = prev_ptr->is_dirty;
+ while ((entry_ptr != NULL) && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
+ (bytes_evicted < eviction_size_limit)) {
+ hbool_t skipping_entry = FALSE;
- if (entry_ptr->is_dirty) {
- HDassert(!entry_ptr->prefetched_dirty);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
- /* dirty corked entry is skipped */
- if (entry_ptr->tag_info && entry_ptr->tag_info->corked)
- skipping_entry = TRUE;
- else {
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
- if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ if (prev_ptr != NULL)
+ prev_is_dirty = prev_ptr->is_dirty;
- if (cache_ptr->entries_removed_counter > 1 ||
- cache_ptr->last_entry_removed_ptr == prev_ptr)
- restart_scan = TRUE;
- } /* end else */
- } /* end if */
- else if (!entry_ptr->prefetched_dirty) {
+ if (entry_ptr->is_dirty) {
+ HDassert(!entry_ptr->prefetched_dirty);
- bytes_evicted += entry_ptr->size;
+ /* dirty corked entry is skipped */
+ if (entry_ptr->tag_info && entry_ptr->tag_info->corked)
+ skipping_entry = TRUE;
+ else {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- } /* end else-if */
- else {
- HDassert(!entry_ptr->is_dirty);
- HDassert(entry_ptr->prefetched_dirty);
- skipping_entry = TRUE;
+ if (cache_ptr->entries_removed_counter > 1 ||
+ cache_ptr->last_entry_removed_ptr == prev_ptr)
+ restart_scan = TRUE;
} /* end else */
+ } /* end if */
+ else if (!entry_ptr->prefetched_dirty) {
- if (prev_ptr != NULL) {
- if (skipping_entry)
- entry_ptr = prev_ptr;
- else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) ||
- (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) ||
- (prev_ptr->is_pinned)) {
- /* Something has happened to the LRU -- start over
- * from the tail.
- */
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
+ bytes_evicted += entry_ptr->size;
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
- } /* end else-if */
- else
- entry_ptr = prev_ptr;
- } /* end if */
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ } /* end else-if */
+ else {
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(entry_ptr->prefetched_dirty);
+
+ skipping_entry = TRUE;
+ } /* end else */
+
+ if (prev_ptr != NULL) {
+ if (skipping_entry)
+ entry_ptr = prev_ptr;
+ else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) ||
+ (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
+ /* Something has happened to the LRU -- start over
+ * from the tail.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+ } /* end else-if */
else
- entry_ptr = NULL;
- } /* end while */
+ entry_ptr = prev_ptr;
+ } /* end if */
+ else
+ entry_ptr = NULL;
+ } /* end while */
- /* for now at least, don't bother to maintain the minimum clean size,
- * as the cache should now be less than its maximum size. Due to
- * the vaguries of the cache size reduction algorithm, we may not
- * reduce the size of the cache.
- *
- * If we do, we will calculate a new minimum clean size, which will
- * be enforced the next time we try to make space in the cache.
- *
- * If we don't, no action is necessary, as we have just evicted and/or
- * or flushed a bunch of entries and therefore the sum of the clean
- * and free space in the cache must be greater than or equal to the
- * min clean space requirement (assuming that requirement was met on
- * entry).
- */
+ /* for now at least, don't bother to maintain the minimum clean size,
+ * as the cache should now be less than its maximum size. Due to
+ * the vaguries of the cache size reduction algorithm, we may not
+ * reduce the size of the cache.
+ *
+ * If we do, we will calculate a new minimum clean size, which will
+ * be enforced the next time we try to make space in the cache.
+ *
+ * If we don't, no action is necessary, as we have just evicted and/or
+ * or flushed a bunch of entries and therefore the sum of the clean
+ * and free space in the cache must be greater than or equal to the
+ * min clean space requirement (assuming that requirement was met on
+ * entry).
+ */
- } /* end if */
- else /* ! write_permitted */ {
- /* Since we are not allowed to write, all we can do is evict
- * any clean entries that we may encounter before we either
- * hit the eviction size limit, or encounter the epoch marker.
- *
- * If we are operating read only, this isn't an issue, as there
- * will not be any dirty entries.
- *
- * If we are operating in R/W mode, all the dirty entries we
- * skip will be flushed the next time we attempt to make space
- * when writes are permitted. This may have some local
- * performance implications, but it shouldn't cause any net
- * slowdown.
- */
- HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
- entry_ptr = cache_ptr->LRU_tail_ptr;
- while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
- (bytes_evicted < eviction_size_limit)) {
- HDassert(!(entry_ptr->is_protected));
+ } /* end if */
+ else /* ! write_permitted */ {
+ /* Since we are not allowed to write, all we can do is evict
+ * any clean entries that we may encounter before we either
+ * hit the eviction size limit, or encounter the epoch marker.
+ *
+ * If we are operating read only, this isn't an issue, as there
+ * will not be any dirty entries.
+ *
+ * If we are operating in R/W mode, all the dirty entries we
+ * skip will be flushed the next time we attempt to make space
+ * when writes are permitted. This may have some local
+ * performance implications, but it shouldn't cause any net
+ * slowdown.
+ */
+ HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
+ (bytes_evicted < eviction_size_limit)) {
+ HDassert(!(entry_ptr->is_protected));
- prev_ptr = entry_ptr->prev;
+ prev_ptr = entry_ptr->prev;
- if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
-
- /* just skip the entry if it is dirty, as we can't do
- * anything with it now since we can't write.
- *
- * Since all entries are clean, serialize() will not be called,
- * and thus we needn't test to see if the LRU has been changed
- * out from under us.
- */
- entry_ptr = prev_ptr;
- } /* end while */
- } /* end else */
+ if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
- if (cache_ptr->index_size < cache_ptr->max_cache_size)
- cache_ptr->cache_full = FALSE;
+ /* just skip the entry if it is dirty, as we can't do
+ * anything with it now since we can't write.
+ *
+ * Since all entries are clean, serialize() will not be called,
+ * and thus we needn't test to see if the LRU has been changed
+ * out from under us.
+ */
+ entry_ptr = prev_ptr;
+ } /* end while */
+ } /* end else */
+
+ if (cache_ptr->index_size < cache_ptr->max_cache_size)
+ cache_ptr->cache_full = FALSE;
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__evict_aged_out_entries() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__autoadjust__ageout__evict_aged_out_entries() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__insert_new_marker
- *
- * Purpose: Find an unused marker cache entry, mark it as used, and
- * insert it at the head of the LRU list. Also add the
- * marker's index in the epoch_markers array.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/19/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__insert_new_marker
+ *
+ * Purpose: Find an unused marker cache entry, mark it as used, and
+ * insert it at the head of the LRU list. Also add the
+ * marker's index in the epoch_markers array.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_NOINIT
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if (cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
+ if (cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
- /* find an unused marker */
- i = 0;
- while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
- i++;
+ /* find an unused marker */
+ i = 0;
+ while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
+ i++;
- if (i >= H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
+ if (i >= H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
- (cache_ptr->epoch_marker_active)[i] = TRUE;
+ (cache_ptr->epoch_marker_active)[i] = TRUE;
- cache_ptr->epoch_marker_ringbuf_last =
- (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
+ (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
- cache_ptr->epoch_marker_ringbuf_size += 1;
+ cache_ptr->epoch_marker_ringbuf_size += 1;
- if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS) {
+ if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+ }
- H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
+ (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
- cache_ptr->epoch_markers_active += 1;
+ cache_ptr->epoch_markers_active += 1;
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__insert_new_marker() */
+} /* H5C__autoadjust__ageout__insert_new_marker() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__remove_all_markers
- *
- * Purpose: Remove all epoch markers from the LRU list and mark them
- * as inactive.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
- int ring_buf_index;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__remove_all_markers
+ *
+ * Purpose: Remove all epoch markers from the LRU list and mark them
+ * as inactive.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+ int ring_buf_index;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_NOINIT
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- while (cache_ptr->epoch_markers_active > 0) {
- /* get the index of the last epoch marker in the LRU list
- * and remove it from the ring buffer.
- */
+ while (cache_ptr->epoch_markers_active > 0) {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
+ */
- ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
- i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- cache_ptr->epoch_marker_ringbuf_size -= 1;
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
- if (cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if (cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- if ((cache_ptr->epoch_marker_active)[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ if ((cache_ptr->epoch_marker_active)[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- /* remove the epoch marker from the LRU list */
- H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ /* remove the epoch marker from the LRU list */
+ H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
+ (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
+ (FAIL))
- /* mark the epoch marker as unused. */
- (cache_ptr->epoch_marker_active)[i] = FALSE;
+ /* mark the epoch marker as unused. */
+ (cache_ptr->epoch_marker_active)[i] = FALSE;
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
- /* decrement the number of active epoch markers */
- cache_ptr->epoch_markers_active -= 1;
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
- HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
- }
+ HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
+ }
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__remove_all_markers() */
+} /* H5C__autoadjust__ageout__remove_all_markers() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__remove_excess_markers
- *
- * Purpose: Remove epoch markers from the end of the LRU list and
- * mark them as inactive until the number of active markers
- * equals the the current value of
- * (cache_ptr->resize_ctl).epochs_before_eviction.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/19/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
- int ring_buf_index;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__remove_excess_markers
+ *
+ * Purpose: Remove epoch markers from the end of the LRU list and
+ * mark them as inactive until the number of active markers
+ * equals the the current value of
+ * (cache_ptr->resize_ctl).epochs_before_eviction.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+ int ring_buf_index;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_NOINIT
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if (cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
+ if (cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
- while (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
- /* get the index of the last epoch marker in the LRU list
- * and remove it from the ring buffer.
- */
+ while (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
+ */
- ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
- i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- cache_ptr->epoch_marker_ringbuf_size -= 1;
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
- if (cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- if ((cache_ptr->epoch_marker_active)[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ if (cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if ((cache_ptr->epoch_marker_active)[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- /* remove the epoch marker from the LRU list */
- H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ /* remove the epoch marker from the LRU list */
+ H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
+ (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
+ (FAIL))
- /* mark the epoch marker as unused. */
- (cache_ptr->epoch_marker_active)[i] = FALSE;
+ /* mark the epoch marker as unused. */
+ (cache_ptr->epoch_marker_active)[i] = FALSE;
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
- /* decrement the number of active epoch markers */
- cache_ptr->epoch_markers_active -= 1;
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
- HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
- }
+ HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
+ }
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__remove_excess_markers() */
+} /* H5C__autoadjust__ageout__remove_excess_markers() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__flash_increase_cache_size
- *
- * Purpose: If there is not at least new_entry_size - old_entry_size
- * bytes of free space in the cache and the current
- * max_cache_size is less than (cache_ptr->resize_ctl).max_size,
- * perform a flash increase in the cache size and then reset
- * the full cache hit rate statistics, and exit.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 12/31/07
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__flash_increase_cache_size(H5C_t * cache_ptr, size_t old_entry_size,
- size_t new_entry_size)
- {
- size_t new_max_cache_size = 0;
- size_t old_max_cache_size = 0;
- size_t new_min_clean_size = 0;
- size_t old_min_clean_size = 0;
- size_t space_needed;
- enum H5C_resize_status status = flash_increase; /* may change */
- double hit_rate;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->flash_size_increase_possible);
- HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold);
- HDassert(old_entry_size < new_entry_size);
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flash_increase_cache_size
+ *
+ * Purpose: If there is not at least new_entry_size - old_entry_size
+ * bytes of free space in the cache and the current
+ * max_cache_size is less than (cache_ptr->resize_ctl).max_size,
+ * perform a flash increase in the cache size and then reset
+ * the full cache hit rate statistics, and exit.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 12/31/07
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size)
+{
+ size_t new_max_cache_size = 0;
+ size_t old_max_cache_size = 0;
+ size_t new_min_clean_size = 0;
+ size_t old_min_clean_size = 0;
+ size_t space_needed;
+ enum H5C_resize_status status = flash_increase; /* may change */
+ double hit_rate;
+ herr_t ret_value = SUCCEED; /* Return value */
- if (old_entry_size >= new_entry_size)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
+ FUNC_ENTER_NOAPI_NOINIT
- space_needed = new_entry_size - old_entry_size;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->flash_size_increase_possible);
+ HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold);
+ HDassert(old_entry_size < new_entry_size);
- if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
- (cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) {
+ if (old_entry_size >= new_entry_size)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
- /* we have work to do */
+ space_needed = new_entry_size - old_entry_size;
- switch ((cache_ptr->resize_ctl).flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
+ if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
+ (cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) {
- case H5C_flash_incr__add_space:
- if (cache_ptr->index_size < cache_ptr->max_cache_size) {
+ /* we have work to do */
- HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed);
- space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size;
- }
- space_needed = (size_t)(((double)space_needed) * (cache_ptr->resize_ctl).flash_multiple);
+ switch ((cache_ptr->resize_ctl).flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
- new_max_cache_size = cache_ptr->max_cache_size + space_needed;
+ case H5C_flash_incr__add_space:
+ if (cache_ptr->index_size < cache_ptr->max_cache_size) {
- break;
+ HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed);
+ space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size;
+ }
+ space_needed = (size_t)(((double)space_needed) * (cache_ptr->resize_ctl).flash_multiple);
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
- }
+ new_max_cache_size = cache_ptr->max_cache_size + space_needed;
- if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
+ break;
- new_max_cache_size = (cache_ptr->resize_ctl).max_size;
- }
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
+ }
- HDassert(new_max_cache_size > cache_ptr->max_cache_size);
+ if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
- new_min_clean_size =
- (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
+ new_max_cache_size = (cache_ptr->resize_ctl).max_size;
+ }
- HDassert(new_min_clean_size <= new_max_cache_size);
+ HDassert(new_max_cache_size > cache_ptr->max_cache_size);
- old_max_cache_size = cache_ptr->max_cache_size;
- old_min_clean_size = cache_ptr->min_clean_size;
+ new_min_clean_size =
+ (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
- cache_ptr->max_cache_size = new_max_cache_size;
- cache_ptr->min_clean_size = new_min_clean_size;
+ HDassert(new_min_clean_size <= new_max_cache_size);
- /* update flash cache size increase fields as appropriate */
- HDassert(cache_ptr->flash_size_increase_possible);
+ old_max_cache_size = cache_ptr->max_cache_size;
+ old_min_clean_size = cache_ptr->min_clean_size;
- switch ((cache_ptr->resize_ctl).flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
- case H5C_flash_incr__add_space:
- cache_ptr->flash_size_increase_threshold =
- (size_t)(((double)(cache_ptr->max_cache_size)) *
- ((cache_ptr->resize_ctl).flash_threshold));
- break;
+ /* update flash cache size increase fields as appropriate */
+ HDassert(cache_ptr->flash_size_increase_possible);
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
- }
+ switch ((cache_ptr->resize_ctl).flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
- /* note that we don't cycle the epoch markers. We can
- * argue either way as to whether we should, but for now
- * we don't.
- */
+ case H5C_flash_incr__add_space:
+ cache_ptr->flash_size_increase_threshold =
+ (size_t)(((double)(cache_ptr->max_cache_size)) *
+ ((cache_ptr->resize_ctl).flash_threshold));
+ break;
- if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
+ }
- /* get the hit rate for the reporting function. Should still
- * be good as we haven't reset the hit rate statistics.
- */
- if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+ /* note that we don't cycle the epoch markers. We can
+ * argue either way as to whether we should, but for now
+ * we don't.
+ */
- (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate,
- status, old_max_cache_size, new_max_cache_size,
- old_min_clean_size, new_min_clean_size);
- }
+ if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
+
+ /* get the hit rate for the reporting function. Should still
+ * be good as we haven't reset the hit rate statistics.
+ */
+ if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
- if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
- /* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate,
+ status, old_max_cache_size, new_max_cache_size,
+ old_min_clean_size, new_min_clean_size);
}
+ if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ }
+
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flash_increase_cache_size() */
+} /* H5C__flash_increase_cache_size() */
- /*-------------------------------------------------------------------------
- * Function: H5C__flush_invalidate_cache
- *
- * Purpose: Flush and destroy the entries contained in the target
- * cache.
- *
- * If the cache contains protected entries, the function will
- * fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
- * flushed and destroyed before the function returns failure.
- *
- * While pinned entries can usually be flushed, they cannot
- * be destroyed. However, they should be unpinned when all
- * the entries that reference them have been destroyed (thus
- * reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
- *
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
- * (including the pinned dirty entries where permitted) and
- * destroying all unpinned entries. This process is repeated
- * until either the cache is empty, or the number of pinned
- * entries stops decreasing on each pass.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 3/24/065
- *
- * Modifications:
- *
- * To support the fractal heap, the cache must now deal with
- * entries being dirtied, resized, and/or renamed inside
- * flush callbacks. Updated function to support this.
- *
- * -- JRM 8/27/06
- *
- * Added code to detect and manage the case in which a
- * flush callback changes the s-list out from under
- * the function. The only way I can think of in which this
- * can happen is if a flush function loads an entry
- * into the cache that isn't there already. Quincey tells
- * me that this will never happen, but I'm not sure I
- * believe him.
- *
- * Note that this is a pretty bad scenario if it ever
- * happens. The code I have added should allow us to
- * handle the situation under all but the worst conditions,
- * but one can argue that we should just scream and die if
- * we ever detect the condition.
- *
- * -- JRM 10/13/07
- *
- * Missing entries?
- *
- *
- * Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
- * This flag is used to flush and evict all entries in
- * the metadata cache that are not pinned -- typically,
- * everything other than the superblock.
- *
- * ??? -- ??/??/??
- *
- * Added sanity checks to verify that the skip list is
- * enabled on entry. On the face of it, it would make
- * sense to enable the slist on entry, and disable it
- * on exit, as this function is not called repeatedly.
- * However, since this function can be called from
- * H5C_flush_cache(), this would create cases in the test
- * code where we would have to check the flags to determine
- * whether we must setup and take down the slist.
- *
- * JRM -- 5/5/20
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__flush_invalidate_cache(H5F_t * f, unsigned flags)
- {
- H5C_t *cache_ptr;
- H5C_ring_t ring;
- herr_t ret_value = SUCCEED;
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_invalidate_cache
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 3/24/065
+ *
+ * Modifications:
+ *
+ * To support the fractal heap, the cache must now deal with
+ * entries being dirtied, resized, and/or renamed inside
+ * flush callbacks. Updated function to support this.
+ *
+ * -- JRM 8/27/06
+ *
+ * Added code to detect and manage the case in which a
+ * flush callback changes the s-list out from under
+ * the function. The only way I can think of in which this
+ * can happen is if a flush function loads an entry
+ * into the cache that isn't there already. Quincey tells
+ * me that this will never happen, but I'm not sure I
+ * believe him.
+ *
+ * Note that this is a pretty bad scenario if it ever
+ * happens. The code I have added should allow us to
+ * handle the situation under all but the worst conditions,
+ * but one can argue that we should just scream and die if
+ * we ever detect the condition.
+ *
+ * -- JRM 10/13/07
+ *
+ * Missing entries?
+ *
+ *
+ * Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
+ * This flag is used to flush and evict all entries in
+ * the metadata cache that are not pinned -- typically,
+ * everything other than the superblock.
+ *
+ * ??? -- ??/??/??
+ *
+ * Added sanity checks to verify that the skip list is
+ * enabled on entry. On the face of it, it would make
+ * sense to enable the slist on entry, and disable it
+ * on exit, as this function is not called repeatedly.
+ * However, since this function can be called from
+ * H5C_flush_cache(), this would create cases in the test
+ * code where we would have to check the flags to determine
+ * whether we must setup and take down the slist.
+ *
+ * JRM -- 5/5/20
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
+{
+ H5C_t *cache_ptr;
+ H5C_ring_t ring;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
- HDassert(cache_ptr->slist_enabled);
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(cache_ptr->slist_enabled);
#if H5C_DO_SANITY_CHECKS
- {
- int32_t i;
- uint32_t index_len = 0;
- uint32_t slist_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
-
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
-
- for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
-
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
-
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
-
- } /* end for */
-
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
- }
+ {
+ int32_t i;
+ uint32_t index_len = 0;
+ uint32_t slist_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+ }
#endif /* H5C_DO_SANITY_CHECKS */
- /* remove ageout markers if present */
- if (cache_ptr->epoch_markers_active > 0) {
+ /* remove ageout markers if present */
+ if (cache_ptr->epoch_markers_active > 0) {
- if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
+ if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
+ }
- /* flush invalidate each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
- while (ring < H5C_RING_NTYPES) {
+ while (ring < H5C_RING_NTYPES) {
- if (H5C_flush_invalidate_ring(f, ring, flags) < 0)
+ if (H5C_flush_invalidate_ring(f, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
- ring++;
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
+ ring++;
- } /* end while */
+ } /* end while */
- /* Invariants, after destroying all entries in the hash table */
- if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
+ /* Invariants, after destroying all entries in the hash table */
+ if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
- HDassert(cache_ptr->index_size == 0);
- HDassert(cache_ptr->clean_index_size == 0);
- HDassert(cache_ptr->pel_len == 0);
- HDassert(cache_ptr->pel_size == 0);
+ HDassert(cache_ptr->index_size == 0);
+ HDassert(cache_ptr->clean_index_size == 0);
+ HDassert(cache_ptr->pel_len == 0);
+ HDassert(cache_ptr->pel_size == 0);
- } /* end if */
- else {
+ } /* end if */
+ else {
- H5C_cache_entry_t *entry_ptr; /* Cache entry */
- unsigned u; /* Local index variable */
+ H5C_cache_entry_t *entry_ptr; /* Cache entry */
+ unsigned u; /* Local index variable */
- /* All rings except ring 4 should be empty now */
- /* (Ring 4 has the superblock) */
- for (u = H5C_RING_USER; u < H5C_RING_SB; u++) {
+ /* All rings except ring 4 should be empty now */
+ /* (Ring 4 has the superblock) */
+ for (u = H5C_RING_USER; u < H5C_RING_SB; u++) {
- HDassert(cache_ptr->index_ring_len[u] == 0);
- HDassert(cache_ptr->index_ring_size[u] == 0);
- HDassert(cache_ptr->clean_index_ring_size[u] == 0);
+ HDassert(cache_ptr->index_ring_len[u] == 0);
+ HDassert(cache_ptr->index_ring_size[u] == 0);
+ HDassert(cache_ptr->clean_index_ring_size[u] == 0);
- } /* end for */
+ } /* end for */
- /* Check that any remaining pinned entries are in the superblock ring */
+ /* Check that any remaining pinned entries are in the superblock ring */
- entry_ptr = cache_ptr->pel_head_ptr;
+ entry_ptr = cache_ptr->pel_head_ptr;
- while (entry_ptr) {
+ while (entry_ptr) {
- /* Check ring */
- HDassert(entry_ptr->ring == H5C_RING_SB);
+ /* Check ring */
+ HDassert(entry_ptr->ring == H5C_RING_SB);
- /* Advance to next entry in pinned entry list */
- entry_ptr = entry_ptr->next;
+ /* Advance to next entry in pinned entry list */
+ entry_ptr = entry_ptr->next;
- } /* end while */
- } /* end else */
+ } /* end while */
+ } /* end else */
- HDassert(cache_ptr->dirty_index_size == 0);
- HDassert(cache_ptr->slist_len == 0);
- HDassert(cache_ptr->slist_size == 0);
- HDassert(cache_ptr->pl_len == 0);
- HDassert(cache_ptr->pl_size == 0);
- HDassert(cache_ptr->LRU_list_len == 0);
- HDassert(cache_ptr->LRU_list_size == 0);
+ HDassert(cache_ptr->dirty_index_size == 0);
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->pl_size == 0);
+ HDassert(cache_ptr->LRU_list_len == 0);
+ HDassert(cache_ptr->LRU_list_size == 0);
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flush_invalidate_cache() */
+} /* H5C__flush_invalidate_cache() */
- /*-------------------------------------------------------------------------
- * Function: H5C_flush_invalidate_ring
- *
- * Purpose: Flush and destroy the entries contained in the target
- * cache and ring.
- *
- * If the ring contains protected entries, the function will
- * fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
- * flushed and destroyed before the function returns failure.
- *
- * While pinned entries can usually be flushed, they cannot
- * be destroyed. However, they should be unpinned when all
- * the entries that reference them have been destroyed (thus
- * reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
- *
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
- * (including the pinned dirty entries where permitted) and
- * destroying all unpinned entries. This process is repeated
- * until either the cache is empty, or the number of pinned
- * entries stops decreasing on each pass.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the cache flushing
- * entries in flush dependency order.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/1/15
- *
- * Changes: Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
- * This flag is used to flush and evict all entries in
- * the metadata cache that are not pinned -- typically,
- * everything other than the superblock.
- *
- * ??? -- ??/??/??
- *
- * A recent optimization turns off the slist unless a flush
- * is in progress. This should not effect this function, as
- * it is only called during a flush. Added an assertion to
- * verify this.
- *
- * JRM -- 5/6/20
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
- {
- H5C_t *cache_ptr;
- hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
- int32_t i;
- int32_t cur_ring_pel_len;
- int32_t old_ring_pel_len;
- unsigned cooked_flags;
- unsigned evict_flags;
- H5SL_node_t *node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *next_entry_ptr = NULL;
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_invalidate_ring
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache and ring.
+ *
+ * If the ring contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the cache flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ * Changes: Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
+ * This flag is used to flush and evict all entries in
+ * the metadata cache that are not pinned -- typically,
+ * everything other than the superblock.
+ *
+ * ??? -- ??/??/??
+ *
+ * A recent optimization turns off the slist unless a flush
+ * is in progress. This should not effect this function, as
+ * it is only called during a flush. Added an assertion to
+ * verify this.
+ *
+ * JRM -- 5/6/20
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t *cache_ptr;
+ hbool_t restart_slist_scan;
+ uint32_t protected_entries = 0;
+ int32_t i;
+ int32_t cur_ring_pel_len;
+ int32_t old_ring_pel_len;
+ unsigned cooked_flags;
+ unsigned evict_flags;
+ H5SL_node_t *node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
+ uint32_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_NOAPI(FAIL)
+ herr_t ret_value = SUCCEED;
- HDassert(f);
- HDassert(f->shared);
+ FUNC_ENTER_NOAPI(FAIL)
- cache_ptr = f->shared->cache;
+ HDassert(f);
+ HDassert(f->shared);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_enabled);
- HDassert(cache_ptr->slist_ptr);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
+ cache_ptr = f->shared->cache;
- HDassert(cache_ptr->epoch_markers_active == 0);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
- /* Filter out the flags that are not relevant to the flush/invalidate.
- */
- cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
- evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
+ HDassert(cache_ptr->epoch_markers_active == 0);
- /* The flush procedure here is a bit strange.
- *
- * In the outer while loop we make at least one pass through the
- * cache, and then repeat until either all the pinned entries in
- * the ring unpin themselves, or until the number of pinned entries
- * in the ring stops declining. In this later case, we scream and die.
- *
- * Since the fractal heap can dirty, resize, and/or move entries
- * in is flush callback, it is possible that the cache will still
- * contain dirty entries at this point. If so, we must make more
- * passes through the skip list to allow it to empty.
- *
- * Further, since clean entries can be dirtied, resized, and/or moved
- * as the result of a flush call back (either the entries own, or that
- * for some other cache entry), we can no longer promise to flush
- * the cache entries in increasing address order.
- *
- * Instead, we just do the best we can -- making a pass through
- * the skip list, and then a pass through the "clean" entries, and
- * then repeating as needed. Thus it is quite possible that an
- * entry will be evicted from the cache only to be re-loaded later
- * in the flush process (From what Quincey tells me, the pin
- * mechanism makes this impossible, but even it it is true now,
- * we shouldn't count on it in the future.)
- *
- * The bottom line is that entries will probably be flushed in close
- * to increasing address order, but there are no guarantees.
- */
+ /* Filter out the flags that are not relevant to the flush/invalidate.
+ */
+ cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
+ evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
+
+ /* The flush procedure here is a bit strange.
+ *
+ * In the outer while loop we make at least one pass through the
+ * cache, and then repeat until either all the pinned entries in
+ * the ring unpin themselves, or until the number of pinned entries
+ * in the ring stops declining. In this later case, we scream and die.
+ *
+ * Since the fractal heap can dirty, resize, and/or move entries
+ * in is flush callback, it is possible that the cache will still
+ * contain dirty entries at this point. If so, we must make more
+ * passes through the skip list to allow it to empty.
+ *
+ * Further, since clean entries can be dirtied, resized, and/or moved
+ * as the result of a flush call back (either the entries own, or that
+ * for some other cache entry), we can no longer promise to flush
+ * the cache entries in increasing address order.
+ *
+ * Instead, we just do the best we can -- making a pass through
+ * the skip list, and then a pass through the "clean" entries, and
+ * then repeating as needed. Thus it is quite possible that an
+ * entry will be evicted from the cache only to be re-loaded later
+ * in the flush process (From what Quincey tells me, the pin
+ * mechanism makes this impossible, but even it it is true now,
+ * we shouldn't count on it in the future.)
+ *
+ * The bottom line is that entries will probably be flushed in close
+ * to increasing address order, but there are no guarantees.
+ */
- /* compute the number of pinned entries in this ring */
+ /* compute the number of pinned entries in this ring */
- entry_ptr = cache_ptr->pel_head_ptr;
- cur_ring_pel_len = 0;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
- while (entry_ptr != NULL) {
+ while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
- if (entry_ptr->ring == ring)
- cur_ring_pel_len++;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+ if (entry_ptr->ring == ring)
+ cur_ring_pel_len++;
- entry_ptr = entry_ptr->next;
+ entry_ptr = entry_ptr->next;
- } /* end while */
+ } /* end while */
- old_ring_pel_len = cur_ring_pel_len;
+ old_ring_pel_len = cur_ring_pel_len;
- while (cache_ptr->index_ring_len[ring] > 0) {
+ while (cache_ptr->index_ring_len[ring] > 0) {
- /* first, try to flush-destroy any dirty entries. Do this by
- * making a scan through the slist. Note that new dirty entries
- * may be created by the flush call backs. Thus it is possible
- * that the slist will not be empty after we finish the scan.
- */
+ /* first, try to flush-destroy any dirty entries. Do this by
+ * making a scan through the slist. Note that new dirty entries
+ * may be created by the flush call backs. Thus it is possible
+ * that the slist will not be empty after we finish the scan.
+ */
#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C__flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
- * slist length and size before we do any flushes.
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* There is also the possibility that entries will be
- * dirtied, resized, moved, and/or removed from the cache
- * as the result of calls to the flush callbacks. We use
- * the slist_len_increase and slist_size_increase increase
- * fields in struct H5C_t to track these changes for purpose
- * of sanity checking.
- *
- * To this end, we must zero these fields before we start
- * the pass through the slist.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* Depending on circumstances, H5C__flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
+ * slist length and size before we do any flushes.
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* There is also the possibility that entries will be
+ * dirtied, resized, moved, and/or removed from the cache
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
+ * of sanity checking.
+ *
+ * To this end, we must zero these fields before we start
+ * the pass through the slist.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- /* Set the cache_ptr->slist_changed to false.
- *
- * This flag is set to TRUE by H5C__flush_single_entry if the slist
- * is modified by a pre_serialize, serialize, or notify callback.
- *
- * H5C_flush_invalidate_ring() uses this flag to detect any
- * modifications to the slist that might corrupt the scan of
- * the slist -- and restart the scan in this event.
- */
- cache_ptr->slist_changed = FALSE;
+ /* Set the cache_ptr->slist_changed to false.
+ *
+ * This flag is set to TRUE by H5C__flush_single_entry if the slist
+ * is modified by a pre_serialize, serialize, or notify callback.
+ *
+ * H5C_flush_invalidate_ring() uses this flag to detect any
+ * modifications to the slist that might corrupt the scan of
+ * the slist -- and restart the scan in this event.
+ */
+ cache_ptr->slist_changed = FALSE;
- /* this done, start the scan of the slist */
- restart_slist_scan = TRUE;
+ /* this done, start the scan of the slist */
+ restart_slist_scan = TRUE;
- while (restart_slist_scan || (node_ptr != NULL)) {
+ while (restart_slist_scan || (node_ptr != NULL)) {
- if (restart_slist_scan) {
+ if (restart_slist_scan) {
- restart_slist_scan = FALSE;
+ restart_slist_scan = FALSE;
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if (node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
+ if (node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
+ if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
- } /* end if */
+ } /* end if */
- entry_ptr = next_entry_ptr;
-
- /* It is possible that entries will be dirtied, resized,
- * flushed, or removed from the cache via the take ownership
- * flag as the result of pre_serialize or serialized callbacks.
- *
- * This in turn can corrupt the scan through the slist.
- *
- * We test for slist modifications in the pre_serialize
- * and serialize callbacks, and restart the scan of the
- * slist if we find them. However, best we do some extra
- * sanity checking just in case.
- */
- HDassert(entry_ptr != NULL);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->ring >= ring);
+ entry_ptr = next_entry_ptr;
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
+ /* It is possible that entries will be dirtied, resized,
+ * flushed, or removed from the cache via the take ownership
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
+ * This in turn can corrupt the scan through the slist.
+ *
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
+ * slist if we find them. However, best we do some extra
+ * sanity checking just in case.
+ */
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
- if (node_ptr != NULL) {
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if (node_ptr != NULL) {
- if (NULL == next_entry_ptr)
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ if (NULL == next_entry_ptr)
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- next_entry_ptr = NULL;
- }
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else {
- /* Note that we now remove nodes from the slist as we flush
- * the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
- * the slist.
- *
- * While this optimization used to be easy, with the possibility
- * of new entries being added to the slist in the midst of the
- * flush, we must keep the slist in canonical form at all
- * times.
- */
- if (((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
+ next_entry_ptr = NULL;
+ }
- if (entry_ptr->is_protected) {
+ /* Note that we now remove nodes from the slist as we flush
+ * the associated entries, instead of leaving them there
+ * until we are done, and then destroying all nodes in
+ * the slist.
+ *
+ * While this optimization used to be easy, with the possibility
+ * of new entries being added to the slist in the midst of the
+ * flush, we must keep the slist in canonical form at all
+ * times.
+ */
+ if (((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
- /* we have major problems -- but lets flush
- * everything we can before we flag an error.
- */
- protected_entries++;
+ if (entry_ptr->is_protected) {
- } /* end if */
- else if (entry_ptr->is_pinned) {
+ /* we have major problems -- but lets flush
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
- if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
+ } /* end if */
+ else if (entry_ptr->is_pinned) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
- if (cache_ptr->slist_changed) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
+ if (cache_ptr->slist_changed) {
- } /* end if */
- } /* end else-if */
- else {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- if (H5C__flush_single_entry(f, entry_ptr,
- (cooked_flags | H5C__DURING_FLUSH_FLAG |
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
+ } /* end if */
+ } /* end else-if */
+ else {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
+ if (H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags | H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- if (cache_ptr->slist_changed) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end else */
- } /* end if */
- } /* end while loop scanning skip list */
+ if (cache_ptr->slist_changed) {
-#if H5C_DO_SANITY_CHECKS
- /* It is possible that entries were added to the slist during
- * the scan, either before or after scan pointer. The following
- * asserts take this into account.
- *
- * Don't bother with the sanity checks if node_ptr != NULL, as
- * in this case we broke out of the loop because it got changed
- * out from under us.
- */
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end else */
+ } /* end if */
+ } /* end while loop scanning skip list */
- if (node_ptr == NULL) {
+#if H5C_DO_SANITY_CHECKS
+ /* It is possible that entries were added to the slist during
+ * the scan, either before or after scan pointer. The following
+ * asserts take this into account.
+ *
+ * Don't bother with the sanity checks if node_ptr != NULL, as
+ * in this case we broke out of the loop because it got changed
+ * out from under us.
+ */
- HDassert(cache_ptr->slist_len ==
- (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
+ if (node_ptr == NULL) {
- HDassert(cache_ptr->slist_size ==
- (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
- } /* end if */
-#endif /* H5C_DO_SANITY_CHECKS */
+ HDassert(cache_ptr->slist_len ==
+ (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
- /* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain.
- *
- * It used to be that all entries remaining in the cache at
- * this point had to be clean, but with the fractal heap mods
- * this may not be the case. If so, we will flush entries out
- * in increasing address order.
- *
- * Writes to disk are possible here.
- */
+ HDassert(cache_ptr->slist_size ==
+ (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
- /* reset the counters so that we can detect insertions, loads,
- * and moves caused by the pre_serialize and serialize calls.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
+ /* Since we are doing a destroy, we must make a pass through
+ * the hash table and try to flush - destroy all entries that
+ * remain.
+ *
+ * It used to be that all entries remaining in the cache at
+ * this point had to be clean, but with the fractal heap mods
+ * this may not be the case. If so, we will flush entries out
+ * in increasing address order.
+ *
+ * Writes to disk are possible here.
+ */
- next_entry_ptr = cache_ptr->il_head;
+ /* reset the counters so that we can detect insertions, loads,
+ * and moves caused by the pre_serialize and serialize calls.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
- while (next_entry_ptr != NULL) {
+ next_entry_ptr = cache_ptr->il_head;
- entry_ptr = next_entry_ptr;
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
+ while (next_entry_ptr != NULL) {
- next_entry_ptr = entry_ptr->il_next;
- HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
+ entry_ptr = next_entry_ptr;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
- if (((!entry_ptr->flush_me_last) ||
- (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
+ next_entry_ptr = entry_ptr->il_next;
+ HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
- if (entry_ptr->is_protected) {
+ if (((!entry_ptr->flush_me_last) ||
+ (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
- /* we have major problems -- but lets flush and
- * destroy everything we can before we flag an
- * error.
- */
- protected_entries++;
+ if (entry_ptr->is_protected) {
- if (!entry_ptr->in_slist) {
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
+ */
+ protected_entries++;
- HDassert(!(entry_ptr->is_dirty));
- }
- } /* end if */
- else if (!(entry_ptr->is_pinned)) {
+ if (!entry_ptr->in_slist) {
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
- * into the cache, or moved to a new location
- * in the file as a side effect of the flush.
- *
- * It's also possible that removing a clean
- * entry will remove the last child of a proxy
- * entry, allowing it to be removed also and
- * invalidating the next_entry_ptr.
- *
- * If either of these happen, and one of the target
- * or proxy entries happens to be the next entry in
- * the hash bucket, we could either find ourselves
- * either scanning a non-existent entry, scanning
- * through a different bucket, or skipping an entry.
- *
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
- * after the flush if we detect that the next_entry_ptr
- * becomes invalid.
- *
- * This is not as inefficient at it might seem,
- * as hash buckets typically have at most two
- * or three entries.
- */
- cache_ptr->entry_watched_for_removal = next_entry_ptr;
-
- if (H5C__flush_single_entry(f, entry_ptr,
- (cooked_flags | H5C__DURING_FLUSH_FLAG |
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
-
- /* Restart the index list scan if necessary. Must
- * do this if the next entry is evicted, and also if
- * one or more entries are inserted, loaded, or moved
- * as these operations can result in part of the scan
- * being skipped -- which can cause a spurious failure
- * if this results in the size of the pinned entry
- * failing to decline during the pass.
- */
- if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) ||
- (cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
+ HDassert(!(entry_ptr->is_dirty));
+ }
+ } /* end if */
+ else if (!(entry_ptr->is_pinned)) {
+
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
+ * into the cache, or moved to a new location
+ * in the file as a side effect of the flush.
+ *
+ * It's also possible that removing a clean
+ * entry will remove the last child of a proxy
+ * entry, allowing it to be removed also and
+ * invalidating the next_entry_ptr.
+ *
+ * If either of these happen, and one of the target
+ * or proxy entries happens to be the next entry in
+ * the hash bucket, we could either find ourselves
+ * either scanning a non-existent entry, scanning
+ * through a different bucket, or skipping an entry.
+ *
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
+ * after the flush if we detect that the next_entry_ptr
+ * becomes invalid.
+ *
+ * This is not as inefficient at it might seem,
+ * as hash buckets typically have at most two
+ * or three entries.
+ */
+ cache_ptr->entry_watched_for_removal = next_entry_ptr;
- next_entry_ptr = cache_ptr->il_head;
+ if (H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags | H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
+
+ /* Restart the index list scan if necessary. Must
+ * do this if the next entry is evicted, and also if
+ * one or more entries are inserted, loaded, or moved
+ * as these operations can result in part of the scan
+ * being skipped -- which can cause a spurious failure
+ * if this results in the size of the pinned entry
+ * failing to decline during the pass.
+ */
+ if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) ||
+ (cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
+ next_entry_ptr = cache_ptr->il_head;
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
- } /* end if */
- else {
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
- cache_ptr->entry_watched_for_removal = NULL;
- }
} /* end if */
- } /* end if */
- } /* end for loop scanning hash table */
+ else {
- /* We can't do anything if entries are pinned. The
- * hope is that the entries will be unpinned as the
- * result of destroys of entries that reference them.
- *
- * We detect this by noting the change in the number
- * of pinned entries from pass to pass. If it stops
- * shrinking before it hits zero, we scream and die.
- */
- old_ring_pel_len = cur_ring_pel_len;
- entry_ptr = cache_ptr->pel_head_ptr;
- cur_ring_pel_len = 0;
+ cache_ptr->entry_watched_for_removal = NULL;
+ }
+ } /* end if */
+ } /* end if */
+ } /* end for loop scanning hash table */
- while (entry_ptr != NULL) {
+ /* We can't do anything if entries are pinned. The
+ * hope is that the entries will be unpinned as the
+ * result of destroys of entries that reference them.
+ *
+ * We detect this by noting the change in the number
+ * of pinned entries from pass to pass. If it stops
+ * shrinking before it hits zero, we scream and die.
+ */
+ old_ring_pel_len = cur_ring_pel_len;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
+ while (entry_ptr != NULL) {
- if (entry_ptr->ring == ring) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
- cur_ring_pel_len++;
- }
+ if (entry_ptr->ring == ring) {
- entry_ptr = entry_ptr->next;
+ cur_ring_pel_len++;
+ }
- } /* end while */
+ entry_ptr = entry_ptr->next;
- /* Check if the number of pinned entries in the ring is positive, and
- * it is not declining. Scream and die if so.
- */
- if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ } /* end while */
- /* Don't error if allowed to have pinned entries remaining */
- if (evict_flags) {
+ /* Check if the number of pinned entries in the ring is positive, and
+ * it is not declining. Scream and die if so.
+ */
+ if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
- HGOTO_DONE(TRUE)
- }
+ /* Don't error if allowed to have pinned entries remaining */
+ if (evict_flags) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
- "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = "
- "%d, ring = %d",
- (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
- } /* end if */
+ HGOTO_DONE(TRUE)
+ }
- HDassert(protected_entries == cache_ptr->pl_len);
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = "
+ "%d, ring = %d",
+ (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
+ } /* end if */
- if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
+ HDassert(protected_entries == cache_ptr->pl_len);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
- "Only protected entries left in cache, protected_entries = %d",
- (int)protected_entries)
+ if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
- } /* main while loop */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "Only protected entries left in cache, protected_entries = %d",
+ (int)protected_entries)
- /* Invariants, after destroying all entries in the ring */
- for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
+ } /* main while loop */
- HDassert(cache_ptr->index_ring_len[i] == 0);
- HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
+ /* Invariants, after destroying all entries in the ring */
+ for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
- HDassert(cache_ptr->slist_ring_len[i] == 0);
- HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->index_ring_len[i] == 0);
+ HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
- } /* end for */
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
- HDassert(protected_entries <= cache_ptr->pl_len);
+ } /* end for */
- if (protected_entries > 0) {
+ HDassert(protected_entries <= cache_ptr->pl_len);
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
- }
- else if (cur_ring_pel_len > 0) {
+ if (protected_entries > 0) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
+ }
+ else if (cur_ring_pel_len > 0) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
+ }
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_flush_invalidate_ring() */
+} /* H5C_flush_invalidate_ring() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_ring
- *
- * Purpose: Flush the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been flushed on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be flushed. However all unprotected entries in the target
- * ring should be flushed before the function returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the slist flushing
- * entries in flush dependency order.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/1/15
- *
- * Changes: A recent optimization turns off the slist unless a flush
- * is in progress. This should not effect this function, as
- * it is only called during a flush. Added an assertion to
- * verify this.
- *
- * JRM -- 5/6/20
- *
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__flush_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
- {
- H5C_t *cache_ptr = f->shared->cache;
- hbool_t flushed_entries_last_pass;
- hbool_t flush_marked_entries;
- hbool_t ignore_protected;
- hbool_t tried_to_flush_protected_entry = FALSE;
- hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
- H5SL_node_t *node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *next_entry_ptr = NULL;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flush_ring
+ *
+ * Purpose: Flush the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been flushed on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be flushed. However all unprotected entries in the target
+ * ring should be flushed before the function returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the slist flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ * Changes: A recent optimization turns off the slist unless a flush
+ * is in progress. This should not effect this function, as
+ * it is only called during a flush. Added an assertion to
+ * verify this.
+ *
+ * JRM -- 5/6/20
+ *
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ hbool_t flushed_entries_last_pass;
+ hbool_t flush_marked_entries;
+ hbool_t ignore_protected;
+ hbool_t tried_to_flush_protected_entry = FALSE;
+ hbool_t restart_slist_scan;
+ uint32_t protected_entries = 0;
+ H5SL_node_t *node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
+ uint32_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- int i;
- herr_t ret_value = SUCCEED;
+ int i;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_enabled);
- HDassert(cache_ptr->slist_ptr);
- HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
- flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
+ ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
+ flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
- if (!flush_marked_entries) {
+ if (!flush_marked_entries) {
- for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) {
+ for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) {
- HDassert(cache_ptr->slist_ring_len[i] == 0);
- }
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
}
+ }
- HDassert(cache_ptr->flush_in_progress);
+ HDassert(cache_ptr->flush_in_progress);
- /* When we are only flushing marked entries, the slist will usually
- * still contain entries when we have flushed everything we should.
- * Thus we track whether we have flushed any entries in the last
- * pass, and terminate if we haven't.
- */
- flushed_entries_last_pass = TRUE;
+ /* When we are only flushing marked entries, the slist will usually
+ * still contain entries when we have flushed everything we should.
+ * Thus we track whether we have flushed any entries in the last
+ * pass, and terminate if we haven't.
+ */
+ flushed_entries_last_pass = TRUE;
- /* Set the cache_ptr->slist_changed to false.
- *
- * This flag is set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize, serialize, or notify callback.
- * H5C_flush_cache uses this flag to detect any modifications
- * to the slist that might corrupt the scan of the slist -- and
- * restart the scan in this event.
- */
- cache_ptr->slist_changed = FALSE;
+ /* Set the cache_ptr->slist_changed to false.
+ *
+ * This flag is set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize, serialize, or notify callback.
+ * H5C_flush_cache uses this flag to detect any modifications
+ * to the slist that might corrupt the scan of the slist -- and
+ * restart the scan in this event.
+ */
+ cache_ptr->slist_changed = FALSE;
- while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) &&
- (flushed_entries_last_pass)) {
+ while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) {
- flushed_entries_last_pass = FALSE;
+ flushed_entries_last_pass = FALSE;
#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
#endif /* H5C_DO_SANITY_CHECKS */
- restart_slist_scan = TRUE;
+ restart_slist_scan = TRUE;
- while ((restart_slist_scan) || (node_ptr != NULL)) {
+ while ((restart_slist_scan) || (node_ptr != NULL)) {
- if (restart_slist_scan) {
+ if (restart_slist_scan) {
- restart_slist_scan = FALSE;
+ restart_slist_scan = FALSE;
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if (node_ptr == NULL) {
+ if (node_ptr == NULL) {
- /* the slist is empty -- break out of inner loop */
- break;
- }
+ /* the slist is empty -- break out of inner loop */
+ break;
+ }
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
+ if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
- } /* end if */
+ } /* end if */
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, I have inserted code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
+ entry_ptr = next_entry_ptr;
- if ((!flush_marked_entries) || (entry_ptr->flush_marker)) {
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->ring >= ring);
- }
+ if ((!flush_marked_entries) || (entry_ptr->flush_marker)) {
- /* Advance node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
+ HDassert(entry_ptr->ring >= ring);
+ }
- if (node_ptr != NULL) {
+ /* Advance node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if (node_ptr != NULL) {
- if (NULL == next_entry_ptr)
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ if (NULL == next_entry_ptr)
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- if (!flush_marked_entries || next_entry_ptr->flush_marker) {
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- }
+ if (!flush_marked_entries || next_entry_ptr->flush_marker) {
- HDassert(entry_ptr != next_entry_ptr);
+ HDassert(next_entry_ptr->ring >= ring);
+ }
- } /* end if */
- else {
+ HDassert(entry_ptr != next_entry_ptr);
- next_entry_ptr = NULL;
- }
+ } /* end if */
+ else {
- if ((!flush_marked_entries || entry_ptr->flush_marker) &&
- ((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
- (flush_marked_entries && entry_ptr->flush_marker)))) &&
- ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) &&
- (entry_ptr->ring == ring)) {
+ next_entry_ptr = NULL;
+ }
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ if ((!flush_marked_entries || entry_ptr->flush_marker) &&
+ ((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
+ (flush_marked_entries && entry_ptr->flush_marker)))) &&
+ ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) &&
+ (entry_ptr->ring == ring)) {
- if (entry_ptr->is_protected) {
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
+ if (entry_ptr->is_protected) {
- } /* end if */
- else {
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
- if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
+ } /* end if */
+ else {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
+ if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
- if (cache_ptr->slist_changed) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ if (cache_ptr->slist_changed) {
- } /* end if */
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- flushed_entries_last_pass = TRUE;
+ } /* end if */
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
+ flushed_entries_last_pass = TRUE;
+
+ } /* end else */
+ } /* end if */
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
#if H5C_DO_SANITY_CHECKS
- /* Verify that the slist size and length are as expected. */
- HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) ==
- cache_ptr->slist_len);
- HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) ==
- cache_ptr->slist_size);
+ /* Verify that the slist size and length are as expected. */
+ HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) ==
+ cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) ==
+ cache_ptr->slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
- } /* while */
+ } /* while */
- HDassert(protected_entries <= cache_ptr->pl_len);
+ HDassert(protected_entries <= cache_ptr->pl_len);
- if (((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
+ if (((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
#if H5C_DO_SANITY_CHECKS
- if (!flush_marked_entries) {
+ if (!flush_marked_entries) {
- HDassert(cache_ptr->slist_ring_len[ring] == 0);
- HDassert(cache_ptr->slist_ring_size[ring] == 0);
+ HDassert(cache_ptr->slist_ring_len[ring] == 0);
+ HDassert(cache_ptr->slist_ring_size[ring] == 0);
- } /* end if */
-#endif /* H5C_DO_SANITY_CHECKS */
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flush_ring() */
+} /* H5C__flush_ring() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_single_entry
- *
- * Purpose: Flush or clear (and evict if requested) the cache entry
- * with the specified address and type. If the type is NULL,
- * any unprotected entry at the specified address will be
- * flushed (and possibly evicted).
- *
- * Attempts to flush a protected entry will result in an
- * error.
- *
- * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
- * be cleared and not flushed, and the call can't be part of a
- * sequence of flushes.
- *
- * The function does nothing silently if there is no entry
- * at the supplied address, or if the entry found has the
- * wrong type.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- * Programmer: John Mainzer, 5/5/04
- *
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function for the addition of the hash table.
- *
- * QAK -- 11/26/04
- * Updated function for the switch from TBBTs to skip lists.
- *
- * JRM -- 1/6/05
- * Updated function to reset the flush_marker field.
- * Also replace references to H5F_FLUSH_INVALIDATE and
- * H5F_FLUSH_CLEAR_ONLY with references to
- * H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG
- * respectively.
- *
- * JRM -- 6/24/05
- * Added code to remove dirty entries from the slist after
- * they have been flushed. Also added a sanity check that
- * will scream if we attempt a write when writes are
- * completely disabled.
- *
- * JRM -- 7/5/05
- * Added code to call the new log_flush callback whenever
- * a dirty entry is written to disk. Note that the callback
- * is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set,
- * as there is no write to file in this case.
- *
- * JRM -- 8/21/06
- * Added code maintaining the flush_in_progress and
- * destroy_in_progress fields in H5C_cache_entry_t.
- *
- * Also added flush_flags parameter to the call to
- * type_ptr->flush() so that the flush routine can report
- * whether the entry has been resized or renamed. Added
- * code using the flush_flags variable to detect the case
- * in which the target entry is resized during flush, and
- * update the caches data structures accordingly.
- *
- * JRM -- 3/29/07
- * Added sanity checks on the new is_read_only and
- * ro_ref_count fields.
- *
- * QAK -- 2/07/08
- * Separated "destroy entry" concept from "remove entry from
- * cache" concept, by adding the 'take_ownership' flag and
- * the "destroy_entry" variable.
- *
- * JRM -- 11/5/08
- * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
- * maintain the new clean_index_size and clean_index_size
- * fields of H5C_t.
- *
- *
- * Missing entries??
- *
- *
- * JRM -- 5/8/20
- * Updated sanity checks for the possibility that the slist
- * is disabled.
- *
- * Also updated main comment to conform more closely with
- * the current state of the code.
- *
- *-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flush_single_entry
+ *
+ * Purpose: Flush or clear (and evict if requested) the cache entry
+ * with the specified address and type. If the type is NULL,
+ * any unprotected entry at the specified address will be
+ * flushed (and possibly evicted).
+ *
+ * Attempts to flush a protected entry will result in an
+ * error.
+ *
+ * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
+ * be cleared and not flushed, and the call can't be part of a
+ * sequence of flushes.
+ *
+ * The function does nothing silently if there is no entry
+ * at the supplied address, or if the entry found has the
+ * wrong type.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ * Programmer: John Mainzer, 5/5/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * QAK -- 11/26/04
+ * Updated function for the switch from TBBTs to skip lists.
+ *
+ * JRM -- 1/6/05
+ * Updated function to reset the flush_marker field.
+ * Also replace references to H5F_FLUSH_INVALIDATE and
+ * H5F_FLUSH_CLEAR_ONLY with references to
+ * H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG
+ * respectively.
+ *
+ * JRM -- 6/24/05
+ * Added code to remove dirty entries from the slist after
+ * they have been flushed. Also added a sanity check that
+ * will scream if we attempt a write when writes are
+ * completely disabled.
+ *
+ * JRM -- 7/5/05
+ * Added code to call the new log_flush callback whenever
+ * a dirty entry is written to disk. Note that the callback
+ * is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set,
+ * as there is no write to file in this case.
+ *
+ * JRM -- 8/21/06
+ * Added code maintaining the flush_in_progress and
+ * destroy_in_progress fields in H5C_cache_entry_t.
+ *
+ * Also added flush_flags parameter to the call to
+ * type_ptr->flush() so that the flush routine can report
+ * whether the entry has been resized or renamed. Added
+ * code using the flush_flags variable to detect the case
+ * in which the target entry is resized during flush, and
+ * update the caches data structures accordingly.
+ *
+ * JRM -- 3/29/07
+ * Added sanity checks on the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * QAK -- 2/07/08
+ * Separated "destroy entry" concept from "remove entry from
+ * cache" concept, by adding the 'take_ownership' flag and
+ * the "destroy_entry" variable.
+ *
+ * JRM -- 11/5/08
+ * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
+ * maintain the new clean_index_size and clean_index_size
+ * fields of H5C_t.
+ *
+ *
+ * Missing entries??
+ *
+ *
+ * JRM -- 5/8/20
+ * Updated sanity checks for the possibility that the slist
+ * is disabled.
+ *
+ * Also updated main comment to conform more closely with
+ * the current state of the code.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
+{
+ H5C_t *cache_ptr; /* Cache for file */
+ hbool_t destroy; /* external flag */
+ hbool_t clear_only; /* external flag */
+ hbool_t free_file_space; /* external flag */
+ hbool_t take_ownership; /* external flag */
+ hbool_t del_from_slist_on_destroy; /* external flag */
+ hbool_t during_flush; /* external flag */
+ hbool_t write_entry; /* internal flag */
+ hbool_t destroy_entry; /* internal flag */
+ hbool_t generate_image; /* internal flag */
+ hbool_t update_page_buffer; /* internal flag */
+ hbool_t was_dirty;
+ hbool_t suppress_image_entry_writes = FALSE;
+ hbool_t suppress_image_entry_frees = FALSE;
+ haddr_t entry_addr = HADDR_UNDEF;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->type);
+
+ /* setup external flags from the flags parameter */
+ destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
+ clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
+ del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
+ during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
+ generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
+ update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
+
+ /* Set the flag for destroying the entry, based on the 'take ownership'
+ * and 'destroy' flags
*/
- herr_t H5C__flush_single_entry(H5F_t * f, H5C_cache_entry_t * entry_ptr, unsigned flags)
- {
- H5C_t *cache_ptr; /* Cache for file */
- hbool_t destroy; /* external flag */
- hbool_t clear_only; /* external flag */
- hbool_t free_file_space; /* external flag */
- hbool_t take_ownership; /* external flag */
- hbool_t del_from_slist_on_destroy; /* external flag */
- hbool_t during_flush; /* external flag */
- hbool_t write_entry; /* internal flag */
- hbool_t destroy_entry; /* internal flag */
- hbool_t generate_image; /* internal flag */
- hbool_t update_page_buffer; /* internal flag */
- hbool_t was_dirty;
- hbool_t suppress_image_entry_writes = FALSE;
- hbool_t suppress_image_entry_frees = FALSE;
- haddr_t entry_addr = HADDR_UNDEF;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
- HDassert(entry_ptr->type);
-
- /* setup external flags from the flags parameter */
- destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
- clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
- free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
- take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
- del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
- during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
- generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
- update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
-
- /* Set the flag for destroying the entry, based on the 'take ownership'
- * and 'destroy' flags
- */
- if (take_ownership) {
+ if (take_ownership) {
- destroy_entry = FALSE;
- }
- else {
+ destroy_entry = FALSE;
+ }
+ else {
- destroy_entry = destroy;
- }
+ destroy_entry = destroy;
+ }
- /* we will write the entry to disk if it exists, is dirty, and if the
- * clear only flag is not set.
- */
- if (entry_ptr->is_dirty && !clear_only) {
+ /* we will write the entry to disk if it exists, is dirty, and if the
+ * clear only flag is not set.
+ */
+ if (entry_ptr->is_dirty && !clear_only) {
- write_entry = TRUE;
- }
- else {
+ write_entry = TRUE;
+ }
+ else {
- write_entry = FALSE;
- }
+ write_entry = FALSE;
+ }
- /* if we have received close warning, and we have been instructed to
- * generate a metadata cache image, and we have actually constructed
- * the entry images, set suppress_image_entry_frees to TRUE.
- *
- * Set suppress_image_entry_writes to TRUE if indicated by the
- * image_ctl flags.
- */
- if ((cache_ptr->close_warning_received) && (cache_ptr->image_ctl.generate_image) &&
- (cache_ptr->num_entries_in_image > 0) && (cache_ptr->image_entries != NULL)) {
+ /* if we have received close warning, and we have been instructed to
+ * generate a metadata cache image, and we have actually constructed
+ * the entry images, set suppress_image_entry_frees to TRUE.
+ *
+ * Set suppress_image_entry_writes to TRUE if indicated by the
+ * image_ctl flags.
+ */
+ if ((cache_ptr->close_warning_received) && (cache_ptr->image_ctl.generate_image) &&
+ (cache_ptr->num_entries_in_image > 0) && (cache_ptr->image_entries != NULL)) {
- /* Sanity checks */
- HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
- HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
- HDassert((!clear_only) || !(entry_ptr->include_in_image));
- HDassert((!take_ownership) || !(entry_ptr->include_in_image));
- HDassert((!free_file_space) || !(entry_ptr->include_in_image));
+ /* Sanity checks */
+ HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
+ HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
+ HDassert((!clear_only) || !(entry_ptr->include_in_image));
+ HDassert((!take_ownership) || !(entry_ptr->include_in_image));
+ HDassert((!free_file_space) || !(entry_ptr->include_in_image));
- suppress_image_entry_frees = TRUE;
+ suppress_image_entry_frees = TRUE;
- if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) {
+ if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) {
- suppress_image_entry_writes = TRUE;
+ suppress_image_entry_writes = TRUE;
- } /* end if */
- } /* end if */
+ } /* end if */
+ } /* end if */
- /* run initial sanity checks */
+ /* run initial sanity checks */
#if H5C_DO_SANITY_CHECKS
- if (cache_ptr->slist_enabled) {
+ if (cache_ptr->slist_enabled) {
- if (entry_ptr->in_slist) {
+ if (entry_ptr->in_slist) {
- HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->is_dirty);
- if ((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
+ if ((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
- } /* end if */
- else {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
+ } /* end if */
+ else {
- HDassert(!entry_ptr->is_dirty);
- HDassert(!entry_ptr->flush_marker);
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(!entry_ptr->flush_marker);
- if ((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
+ if ((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
- } /* end else */
- }
- else { /* slist is disabled */
+ } /* end else */
+ }
+ else { /* slist is disabled */
- HDassert(!entry_ptr->in_slist);
+ HDassert(!entry_ptr->in_slist);
- if (!entry_ptr->is_dirty) {
+ if (!entry_ptr->is_dirty) {
- if (entry_ptr->flush_marker)
+ if (entry_ptr->flush_marker)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?")
}
+ }
#endif /* H5C_DO_SANITY_CHECKS */
- if (entry_ptr->is_protected) {
+ if (entry_ptr->is_protected) {
- HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->is_protected);
- /* Attempt to flush a protected entry -- scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
+ /* Attempt to flush a protected entry -- scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
- } /* end if */
+ } /* end if */
- /* Set entry_ptr->flush_in_progress = TRUE and set
- * entry_ptr->flush_marker = FALSE
- *
- * We will set flush_in_progress back to FALSE at the end if the
- * entry still exists at that point.
- */
- entry_ptr->flush_in_progress = TRUE;
- entry_ptr->flush_marker = FALSE;
+ /* Set entry_ptr->flush_in_progress = TRUE and set
+ * entry_ptr->flush_marker = FALSE
+ *
+ * We will set flush_in_progress back to FALSE at the end if the
+ * entry still exists at that point.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+ entry_ptr->flush_marker = FALSE;
- /* Preserve current dirty state for later */
- was_dirty = entry_ptr->is_dirty;
+ /* Preserve current dirty state for later */
+ was_dirty = entry_ptr->is_dirty;
- /* The entry is dirty, and we are doing a flush, a flush destroy or have
- * been requested to generate an image. In those cases, serialize the
- * entry.
- */
- if (write_entry || generate_image) {
+ /* The entry is dirty, and we are doing a flush, a flush destroy or have
+ * been requested to generate an image. In those cases, serialize the
+ * entry.
+ */
+ if (write_entry || generate_image) {
- HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->is_dirty);
- if (NULL == entry_ptr->image_ptr) {
+ if (NULL == entry_ptr->image_ptr) {
- if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+ if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
+ } /* end if */
- if (!(entry_ptr->image_up_to_date)) {
+ if (!(entry_ptr->image_up_to_date)) {
- /* Sanity check */
- HDassert(!entry_ptr->prefetched);
+ /* Sanity check */
+ HDassert(!entry_ptr->prefetched);
- /* Generate the entry's image */
- if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
+ /* Generate the entry's image */
+ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
- } /* end if ( ! (entry_ptr->image_up_to_date) ) */
- } /* end if */
+ } /* end if ( ! (entry_ptr->image_up_to_date) ) */
+ } /* end if */
- /* Finally, write the image to disk.
- *
- * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
- * in the entry's type, we silently skip the write. This
- * flag should only be used in test code.
- */
- if (write_entry) {
+ /* Finally, write the image to disk.
+ *
+ * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
+ * in the entry's type, we silently skip the write. This
+ * flag should only be used in test code.
+ */
+ if (write_entry) {
- HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->is_dirty);
#if H5C_DO_SANITY_CHECKS
- if ((cache_ptr->check_write_permitted) && (!(cache_ptr->write_permitted)))
+ if ((cache_ptr->check_write_permitted) && (!(cache_ptr->write_permitted)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
#endif /* H5C_DO_SANITY_CHECKS */
- /* Write the image to disk unless the write is suppressed.
- *
- * This happens if both suppress_image_entry_writes and
- * entry_ptr->include_in_image are TRUE, or if the
- * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
- * flag should only be used in test code
- */
- if (((!suppress_image_entry_writes) || (!entry_ptr->include_in_image)) &&
- (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
+ /* Write the image to disk unless the write is suppressed.
+ *
+ * This happens if both suppress_image_entry_writes and
+ * entry_ptr->include_in_image are TRUE, or if the
+ * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
+ * flag should only be used in test code
+ */
+ if (((!suppress_image_entry_writes) || (!entry_ptr->include_in_image)) &&
+ (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
- H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
+ H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
#ifdef H5_HAVE_PARALLEL
- if (cache_ptr->coll_write_list) {
+ if (cache_ptr->coll_write_list) {
- if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
+ if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
- } /* end if */
- else {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
+ } /* end if */
+ else {
#endif /* H5_HAVE_PARALLEL */
- if (entry_ptr->prefetched) {
+ if (entry_ptr->prefetched) {
- HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
- } /* end if */
- else {
+ mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
+ } /* end if */
+ else {
- mem_type = entry_ptr->type->mem_type;
- }
+ mem_type = entry_ptr->type->mem_type;
+ }
- if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) <
- 0)
+ if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
#ifdef H5_HAVE_PARALLEL
- }
+ }
#endif /* H5_HAVE_PARALLEL */
- } /* end if */
+ } /* end if */
- /* if the entry has a notify callback, notify it that we have
- * just flushed the entry.
- */
- if ((entry_ptr->type->notify) &&
- ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0))
+ /* if the entry has a notify callback, notify it that we have
+ * just flushed the entry.
+ */
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
- } /* if ( write_entry ) */
+ } /* if ( write_entry ) */
- /* At this point, all pre-serialize and serialize calls have been
- * made if it was appropriate to make them. Similarly, the entry
- * has been written to disk if desired.
- *
- * Thus it is now safe to update the cache data structures for the
- * flush.
- */
+ /* At this point, all pre-serialize and serialize calls have been
+ * made if it was appropriate to make them. Similarly, the entry
+ * has been written to disk if desired.
+ *
+ * Thus it is now safe to update the cache data structures for the
+ * flush.
+ */
- /* start by updating the statistics */
- if (clear_only) {
+ /* start by updating the statistics */
+ if (clear_only) {
- /* only log a clear if the entry was dirty */
- if (was_dirty) {
+ /* only log a clear if the entry was dirty */
+ if (was_dirty) {
- H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+ H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
- } /* end if */
- }
- else if (write_entry) {
+ } /* end if */
+ }
+ else if (write_entry) {
- HDassert(was_dirty);
+ HDassert(was_dirty);
- /* only log a flush if we actually wrote to disk */
- H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
+ /* only log a flush if we actually wrote to disk */
+ H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
- } /* end else if */
+ } /* end else if */
- /* Note that the algorithm below is (very) similar to the set of operations
- * in H5C_remove_entry() and should be kept in sync with changes
- * to that code. - QAK, 2016/11/30
- */
+ /* Note that the algorithm below is (very) similar to the set of operations
+ * in H5C_remove_entry() and should be kept in sync with changes
+ * to that code. - QAK, 2016/11/30
+ */
- /* Update the cache internal data structures. */
- if (destroy) {
+ /* Update the cache internal data structures. */
+ if (destroy) {
- /* Sanity checks */
- if (take_ownership) {
+ /* Sanity checks */
+ if (take_ownership) {
- HDassert(!destroy_entry);
- }
- else {
+ HDassert(!destroy_entry);
+ }
+ else {
- HDassert(destroy_entry);
- }
+ HDassert(destroy_entry);
+ }
- HDassert(!entry_ptr->is_pinned);
+ HDassert(!entry_ptr->is_pinned);
- /* Update stats, while entry is still in the cache */
- H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
+ /* Update stats, while entry is still in the cache */
+ H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
- /* If the entry's type has a 'notify' callback and the entry is about
- * to be removed from the cache, send a 'before eviction' notice while
- * the entry is still fully integrated in the cache.
- */
- if ((entry_ptr->type->notify) &&
- ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0))
+ /* If the entry's type has a 'notify' callback and the entry is about
+ * to be removed from the cache, send a 'before eviction' notice while
+ * the entry is still fully integrated in the cache.
+ */
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
- /* Update the cache internal data structures as appropriate
- * for a destroy. Specifically:
- *
- * 1) Delete it from the index
- *
- * 2) Delete it from the skip list if requested.
- *
- * 3) Delete it from the collective read access list.
- *
- * 4) Update the replacement policy for eviction
- *
- * 5) Remove it from the tag list for this object
- *
- * Finally, if the destroy_entry flag is set, discard the
- * entry.
- */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
+ /* Update the cache internal data structures as appropriate
+ * for a destroy. Specifically:
+ *
+ * 1) Delete it from the index
+ *
+ * 2) Delete it from the skip list if requested.
+ *
+ * 3) Delete it from the collective read access list.
+ *
+ * 4) Update the replacement policy for eviction
+ *
+ * 5) Remove it from the tag list for this object
+ *
+ * Finally, if the destroy_entry flag is set, discard the
+ * entry.
+ */
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
- if ((entry_ptr->in_slist) && (del_from_slist_on_destroy)) {
+ if ((entry_ptr->in_slist) && (del_from_slist_on_destroy)) {
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
- }
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+ }
#ifdef H5_HAVE_PARALLEL
- /* Check for collective read access flag */
- if (entry_ptr->coll_access) {
+ /* Check for collective read access flag */
+ if (entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
+ entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+ H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+ H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
- /* Remove entry from tag list */
- if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
+ /* Remove entry from tag list */
+ if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
- /* verify that the entry is no longer part of any flush dependencies */
- HDassert(entry_ptr->flush_dep_nparents == 0);
- HDassert(entry_ptr->flush_dep_nchildren == 0);
+ /* verify that the entry is no longer part of any flush dependencies */
+ HDassert(entry_ptr->flush_dep_nparents == 0);
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
- } /* end if */
- else {
+ } /* end if */
+ else {
- HDassert(clear_only || write_entry);
- HDassert(entry_ptr->is_dirty);
- HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
+ HDassert(clear_only || write_entry);
+ HDassert(entry_ptr->is_dirty);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
- /* We are either doing a flush or a clear.
- *
- * A clear and a flush are the same from the point of
- * view of the replacement policy and the slist.
- * Hence no differentiation between them.
- *
- * JRM -- 7/7/07
- */
+ /* We are either doing a flush or a clear.
+ *
+ * A clear and a flush are the same from the point of
+ * view of the replacement policy and the slist.
+ * Hence no differentiation between them.
+ *
+ * JRM -- 7/7/07
+ */
- H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
+ H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
- /* mark the entry as clean and update the index for
- * entry clean. Also, call the clear callback
- * if defined.
- */
- entry_ptr->is_dirty = FALSE;
+ /* mark the entry as clean and update the index for
+ * entry clean. Also, call the clear callback
+ * if defined.
+ */
+ entry_ptr->is_dirty = FALSE;
- H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
+ H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
- /* Check for entry changing status and do notifications, etc. */
- if (was_dirty) {
+ /* Check for entry changing status and do notifications, etc. */
+ if (was_dirty) {
- /* If the entry's type has a 'notify' callback send a
- * 'entry cleaned' notice now that the entry is fully
- * integrated into the cache.
- */
- if ((entry_ptr->type->notify) &&
- ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
+ */
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
- /* Propagate the clean flag up the flush dependency chain
- * if appropriate
- */
- if (entry_ptr->flush_dep_ndirty_children != 0) {
+ /* Propagate the clean flag up the flush dependency chain
+ * if appropriate
+ */
+ if (entry_ptr->flush_dep_ndirty_children != 0) {
- HDassert(entry_ptr->flush_dep_ndirty_children == 0);
- }
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+ }
- if (entry_ptr->flush_dep_nparents > 0) {
+ if (entry_ptr->flush_dep_nparents > 0) {
- if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL,
- "Can't propagate flush dep clean flag")
- }
- } /* end if */
- } /* end else */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag")
+ }
+ } /* end if */
+ } /* end else */
- /* reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
+ /* reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
- /* capture the cache entry address for the log_flush call at the
- * end before the entry_ptr gets freed
- */
- entry_addr = entry_ptr->addr;
+ /* capture the cache entry address for the log_flush call at the
+ * end before the entry_ptr gets freed
+ */
+ entry_addr = entry_ptr->addr;
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
- *
- * Now discard the entry if appropriate.
- */
- if (destroy) {
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
+ *
+ * Now discard the entry if appropriate.
+ */
+ if (destroy) {
- /* Sanity check */
- HDassert(0 == entry_ptr->flush_dep_nparents);
+ /* Sanity check */
+ HDassert(0 == entry_ptr->flush_dep_nparents);
- /* if both suppress_image_entry_frees and entry_ptr->include_in_image
- * are true, simply set entry_ptr->image_ptr to NULL, as we have
- * another pointer to the buffer in an instance of H5C_image_entry_t
- * in cache_ptr->image_entries.
- *
- * Otherwise, free the buffer if it exists.
- */
- if (suppress_image_entry_frees && entry_ptr->include_in_image) {
+ /* if both suppress_image_entry_frees and entry_ptr->include_in_image
+ * are true, simply set entry_ptr->image_ptr to NULL, as we have
+ * another pointer to the buffer in an instance of H5C_image_entry_t
+ * in cache_ptr->image_entries.
+ *
+ * Otherwise, free the buffer if it exists.
+ */
+ if (suppress_image_entry_frees && entry_ptr->include_in_image) {
- entry_ptr->image_ptr = NULL;
- }
- else if (entry_ptr->image_ptr != NULL) {
+ entry_ptr->image_ptr = NULL;
+ }
+ else if (entry_ptr->image_ptr != NULL) {
- entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
- }
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ }
- /* If the entry is not a prefetched entry, verify that the flush
- * dependency parents addresses array has been transferred.
- *
- * If the entry is prefetched, the free_isr routine will dispose of
- * the flush dependency parents addresses array if necessary.
- */
- if (!entry_ptr->prefetched) {
+ /* If the entry is not a prefetched entry, verify that the flush
+ * dependency parents addresses array has been transferred.
+ *
+ * If the entry is prefetched, the free_isr routine will dispose of
+ * the flush dependency parents addresses array if necessary.
+ */
+ if (!entry_ptr->prefetched) {
- HDassert(0 == entry_ptr->fd_parent_count);
- HDassert(NULL == entry_ptr->fd_parent_addrs);
+ HDassert(0 == entry_ptr->fd_parent_count);
+ HDassert(NULL == entry_ptr->fd_parent_addrs);
- } /* end if */
+ } /* end if */
- /* Check whether we should free the space in the file that
- * the entry occupies
- */
- if (free_file_space) {
+ /* Check whether we should free the space in the file that
+ * the entry occupies
+ */
+ if (free_file_space) {
- hsize_t fsf_size;
+ hsize_t fsf_size;
- /* Sanity checks */
- HDassert(H5F_addr_defined(entry_ptr->addr));
- HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
+ /* Sanity checks */
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
#ifndef NDEBUG
- {
- size_t curr_len;
+ {
+ size_t curr_len;
- /* Get the actual image size for the thing again */
- entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
- HDassert(curr_len == entry_ptr->size);
- }
+ /* Get the actual image size for the thing again */
+ entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
+ HDassert(curr_len == entry_ptr->size);
+ }
#endif /* NDEBUG */
- /* If the file space free size callback is defined, use
- * it to get the size of the block of file space to free.
- * Otherwise use entry_ptr->size.
- */
- if (entry_ptr->type->fsf_size) {
+ /* If the file space free size callback is defined, use
+ * it to get the size of the block of file space to free.
+ * Otherwise use entry_ptr->size.
+ */
+ if (entry_ptr->type->fsf_size) {
- if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
+ if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
- } /* end if */
- else { /* no file space free size callback -- use entry size */
+ } /* end if */
+ else { /* no file space free size callback -- use entry size */
- fsf_size = entry_ptr->size;
- }
+ fsf_size = entry_ptr->size;
+ }
- /* Release the space on disk */
- if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
+ /* Release the space on disk */
+ if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
- } /* end if ( free_file_space ) */
+ } /* end if ( free_file_space ) */
- /* Reset the pointer to the cache the entry is within. -QAK */
- entry_ptr->cache_ptr = NULL;
+ /* Reset the pointer to the cache the entry is within. -QAK */
+ entry_ptr->cache_ptr = NULL;
- /* increment entries_removed_counter and set
- * last_entry_removed_ptr. As we are likely abuut to
- * free the entry, recall that last_entry_removed_ptr
- * must NEVER be dereferenced.
- *
- * Recall that these fields are maintained to allow functions
- * that perform scans of lists of entries to detect the
- * unexpected removal of entries (via expunge, eviction,
- * or take ownership at present), so that they can re-start
- * their scans if necessary.
- *
- * Also check if the entry we are watching for removal is being
- * removed (usually the 'next' entry for an iteration) and reset
- * it to indicate that it was removed.
- */
- cache_ptr->entries_removed_counter++;
- cache_ptr->last_entry_removed_ptr = entry_ptr;
+ /* increment entries_removed_counter and set
+ * last_entry_removed_ptr. As we are likely abuut to
+ * free the entry, recall that last_entry_removed_ptr
+ * must NEVER be dereferenced.
+ *
+ * Recall that these fields are maintained to allow functions
+ * that perform scans of lists of entries to detect the
+ * unexpected removal of entries (via expunge, eviction,
+ * or take ownership at present), so that they can re-start
+ * their scans if necessary.
+ *
+ * Also check if the entry we are watching for removal is being
+ * removed (usually the 'next' entry for an iteration) and reset
+ * it to indicate that it was removed.
+ */
+ cache_ptr->entries_removed_counter++;
+ cache_ptr->last_entry_removed_ptr = entry_ptr;
- if (entry_ptr == cache_ptr->entry_watched_for_removal) {
+ if (entry_ptr == cache_ptr->entry_watched_for_removal) {
- cache_ptr->entry_watched_for_removal = NULL;
- }
+ cache_ptr->entry_watched_for_removal = NULL;
+ }
- /* Check for actually destroying the entry in memory */
- /* (As opposed to taking ownership of it) */
- if (destroy_entry) {
+ /* Check for actually destroying the entry in memory */
+ /* (As opposed to taking ownership of it) */
+ if (destroy_entry) {
- if (entry_ptr->is_dirty) {
+ if (entry_ptr->is_dirty) {
- /* Reset dirty flag */
- entry_ptr->is_dirty = FALSE;
+ /* Reset dirty flag */
+ entry_ptr->is_dirty = FALSE;
- /* If the entry's type has a 'notify' callback send a
- * 'entry cleaned' notice now that the entry is fully
- * integrated into the cache.
- */
- if ((entry_ptr->type->notify) &&
- ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
+ */
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
- } /* end if */
+ } /* end if */
- /* we are about to discard the in core representation --
- * set the magic field to bad magic so we can detect a
- * freed entry if we see one.
- */
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+ /* we are about to discard the in core representation --
+ * set the magic field to bad magic so we can detect a
+ * freed entry if we see one.
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
- /* verify that the image has been freed */
- HDassert(entry_ptr->image_ptr == NULL);
+ /* verify that the image has been freed */
+ HDassert(entry_ptr->image_ptr == NULL);
- if (entry_ptr->type->free_icr((void *)entry_ptr) < 0)
+ if (entry_ptr->type->free_icr((void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
- } /* end if */
- else {
+ } /* end if */
+ else {
- HDassert(take_ownership);
+ HDassert(take_ownership);
- /* client is taking ownership of the entry.
- * set bad magic here too so the cache will choke
- * unless the entry is re-inserted properly
- */
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+ /* client is taking ownership of the entry.
+ * set bad magic here too so the cache will choke
+ * unless the entry is re-inserted properly
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
- } /* end else */
- } /* if (destroy) */
+ } /* end else */
+ } /* if (destroy) */
- /* Check if we have to update the page buffer with cleared entries
- * so it doesn't go out of date
- */
- if (update_page_buffer) {
+ /* Check if we have to update the page buffer with cleared entries
+ * so it doesn't go out of date
+ */
+ if (update_page_buffer) {
- /* Sanity check */
- HDassert(!destroy);
- HDassert(entry_ptr->image_ptr);
+ /* Sanity check */
+ HDassert(!destroy);
+ HDassert(entry_ptr->image_ptr);
- if ((f->shared->page_buf) && (f->shared->page_buf->page_size >= entry_ptr->size)) {
+ if ((f->shared->page_buf) && (f->shared->page_buf->page_size >= entry_ptr->size)) {
- if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size,
- entry_ptr->image_ptr) > 0)
+ if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size,
+ entry_ptr->image_ptr) > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
- } /* end if */
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
+ } /* end if */
+ } /* end if */
- if (cache_ptr->log_flush) {
+ if (cache_ptr->log_flush) {
- if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
+ if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
- } /* end if */
+ } /* end if */
done:
- HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty));
+ HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty));
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flush_single_entry() */
+} /* H5C__flush_single_entry() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__verify_len_eoa
- *
- * Purpose: Verify that 'len' does not exceed eoa when 'actual' is
- * false i.e. 'len" is the initial speculative length from
- * get_load_size callback with null image pointer.
- * If exceed, adjust 'len' accordingly.
- *
- * Verify that 'len' should not exceed eoa when 'actual' is
- * true i.e. 'len' is the actual length from get_load_size
- * callback with non-null image pointer.
- * If exceed, return error.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: Vailin Choi
- * 9/6/15
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__verify_len_eoa(H5F_t * f, const H5C_class_t *type, haddr_t addr, size_t *len,
- hbool_t actual)
- {
- H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */
- haddr_t eoa; /* End-of-allocation in the file */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__verify_len_eoa
+ *
+ * Purpose: Verify that 'len' does not exceed eoa when 'actual' is
+ * false i.e. 'len" is the initial speculative length from
+ * get_load_size callback with null image pointer.
+ * If exceed, adjust 'len' accordingly.
+ *
+ * Verify that 'len' should not exceed eoa when 'actual' is
+ * true i.e. 'len' is the actual length from get_load_size
+ * callback with non-null image pointer.
+ * If exceed, return error.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Vailin Choi
+ * 9/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual)
+{
+ H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */
+ haddr_t eoa; /* End-of-allocation in the file */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
- * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
- * Thus we do the same for purposes of computing the EOA
- * for sanity checks.
- */
- cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
+ /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
+ * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
+ * Thus we do the same for purposes of computing the EOA
+ * for sanity checks.
+ */
+ cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
- /* Get the file's end-of-allocation value */
- eoa = H5F_get_eoa(f, cooked_type);
- if (!H5F_addr_defined(eoa))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
+ /* Get the file's end-of-allocation value */
+ eoa = H5F_get_eoa(f, cooked_type);
+ if (!H5F_addr_defined(eoa))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
- /* Check for bad address in general */
- if (H5F_addr_gt(addr, eoa))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
+ /* Check for bad address in general */
+ if (H5F_addr_gt(addr, eoa))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
- /* Check if the amount of data to read will be past the EOA */
- if (H5F_addr_gt((addr + *len), eoa)) {
- if (actual)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
- else
- /* Trim down the length of the metadata */
- *len = (size_t)(eoa - addr);
- } /* end if */
+ /* Check if the amount of data to read will be past the EOA */
+ if (H5F_addr_gt((addr + *len), eoa)) {
+ if (actual)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
+ else
+ /* Trim down the length of the metadata */
+ *len = (size_t)(eoa - addr);
+ } /* end if */
- if (*len <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
+ if (*len <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__verify_len_eoa() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__verify_len_eoa() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_load_entry
- *
- * Purpose: Attempt to load the entry at the specified disk address
- * and with the specified type into memory. If successful.
- * return the in memory address of the entry. Return NULL
- * on failure.
- *
- * Note that this function simply loads the entry into
- * core. It does not insert it into the cache.
- *
- * Return: Non-NULL on success / NULL on failure.
- *
- * Programmer: John Mainzer, 5/18/04
- *
- *-------------------------------------------------------------------------
- */
- static void *H5C_load_entry(H5F_t * f,
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_load_entry
+ *
+ * Purpose: Attempt to load the entry at the specified disk address
+ * and with the specified type into memory. If successful.
+ * return the in memory address of the entry. Return NULL
+ * on failure.
+ *
+ * Note that this function simply loads the entry into
+ * core. It does not insert it into the cache.
+ *
+ * Return: Non-NULL on success / NULL on failure.
+ *
+ * Programmer: John Mainzer, 5/18/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5C_load_entry(H5F_t *f,
#ifdef H5_HAVE_PARALLEL
- hbool_t coll_access,
+ hbool_t coll_access,
#endif /* H5_HAVE_PARALLEL */
- const H5C_class_t *type, haddr_t addr, void *udata)
- {
- hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
- uint8_t *image = NULL; /* Buffer for disk image */
- void *thing = NULL; /* Pointer to thing loaded */
- H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */
- size_t len; /* Size of image in file */
+ const H5C_class_t *type, haddr_t addr, void *udata)
+{
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was dirtied during deserialize */
+ uint8_t *image = NULL; /* Buffer for disk image */
+ void *thing = NULL; /* Pointer to thing loaded */
+ H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as cache entry */
+ size_t len; /* Size of image in file */
#ifdef H5_HAVE_PARALLEL
- int mpi_rank = 0; /* MPI process rank */
- MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */
- int mpi_code; /* MPI error code */
-#endif /* H5_HAVE_PARALLEL */
- void *ret_value = NULL; /* Return value */
+ int mpi_rank = 0; /* MPI process rank */
+ MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */
+ int mpi_code; /* MPI error code */
+#endif /* H5_HAVE_PARALLEL */
+ void *ret_value = NULL; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_NOINIT
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->cache);
- HDassert(type);
- HDassert(H5F_addr_defined(addr));
- HDassert(type->get_initial_load_size);
- if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
- HDassert(type->get_final_load_size);
- else
- HDassert(NULL == type->get_final_load_size);
- HDassert(type->deserialize);
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(type);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(type->get_initial_load_size);
+ if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
+ HDassert(type->get_final_load_size);
+ else
+ HDassert(NULL == type->get_final_load_size);
+ HDassert(type->deserialize);
- /* Can't see how skip reads could be usefully combined with
- * the speculative read flag. Hence disallow.
- */
- HDassert(
- !((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
-
- /* Call the get_initial_load_size callback, to retrieve the initial size of image */
- if (type->get_initial_load_size(udata, &len) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
- HDassert(len > 0);
-
- /* Check for possible speculative read off the end of the file */
- if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
- if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
-
- /* Allocate the buffer for reading the on-disk entry image */
- if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
+ /* Can't see how skip reads could be usefully combined with
+ * the speculative read flag. Hence disallow.
+ */
+ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
+
+ /* Call the get_initial_load_size callback, to retrieve the initial size of image */
+ if (type->get_initial_load_size(udata, &len) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
+ HDassert(len > 0);
+
+ /* Check for possible speculative read off the end of the file */
+ if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
+ if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
+
+ /* Allocate the buffer for reading the on-disk entry image */
+ if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
#ifdef H5_HAVE_PARALLEL
- if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
- if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
- if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
+ if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
+ if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- /* Get the on-disk entry image */
- if (0 == (type->flags & H5C__CLASS_SKIP_READS)) {
- unsigned tries, max_tries; /* The # of read attempts */
- unsigned retries; /* The # of retries */
- htri_t chk_ret; /* return from verify_chksum callback */
- size_t actual_len = len; /* The actual length, after speculative reads have been resolved */
- uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */
- void *new_image; /* Pointer to image */
- hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */
-
- /* Get the # of read attempts */
- max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
-
- /*
- * This do/while loop performs the following till the metadata checksum
- * is correct or the file's number of allowed read attempts are reached.
- * --read the metadata
- * --determine the actual size of the metadata
- * --perform checksum verification
- */
- do {
- if (actual_len != len) {
- if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
- image = (uint8_t *)new_image;
+ /* Get the on-disk entry image */
+ if (0 == (type->flags & H5C__CLASS_SKIP_READS)) {
+ unsigned tries, max_tries; /* The # of read attempts */
+ unsigned retries; /* The # of retries */
+ htri_t chk_ret; /* return from verify_chksum callback */
+ size_t actual_len = len; /* The actual length, after speculative reads have been resolved */
+ uint64_t nanosec = 1; /* # of nanoseconds to sleep between retries */
+ void *new_image; /* Pointer to image */
+ hbool_t len_changed = TRUE; /* Whether to re-check speculative entries */
+
+ /* Get the # of read attempts */
+ max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
+
+ /*
+ * This do/while loop performs the following till the metadata checksum
+ * is correct or the file's number of allowed read attempts are reached.
+ * --read the metadata
+ * --determine the actual size of the metadata
+ * --perform checksum verification
+ */
+ do {
+ if (actual_len != len) {
+ if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
+ H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
#ifdef H5_HAVE_PARALLEL
- if (!coll_access || 0 == mpi_rank) {
+ if (!coll_access || 0 == mpi_rank) {
#endif /* H5_HAVE_PARALLEL */
- if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) {
+ if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) {
#ifdef H5_HAVE_PARALLEL
- if (coll_access) {
- /* Push an error, but still participate in following MPI_Bcast */
- HDmemset(image, 0, len);
- HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
- }
- else
-#endif
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+ if (coll_access) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDmemset(image, 0, len);
+ HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
}
+ else
+#endif
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+ }
#ifdef H5_HAVE_PARALLEL
- } /* end if */
- /* if the collective metadata read optimization is turned on,
- * bcast the metadata read from process 0 to all ranks in the file
- * communicator
- */
- if (coll_access) {
- int buf_size;
+ } /* end if */
+ /* if the collective metadata read optimization is turned on,
+ * bcast the metadata read from process 0 to all ranks in the file
+ * communicator
+ */
+ if (coll_access) {
+ int buf_size;
- H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
- if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
+ if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- /* If the entry could be read speculatively and the length is still
- * changing, check for updating the actual size
- */
- if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) {
- /* Retrieve the actual length */
- actual_len = len;
- if (type->get_final_load_size(image, len, udata, &actual_len) < 0)
- continue; /* Transfer control to while() and count towards retries */
-
- /* Check for the length changing */
- if (actual_len != len) {
- /* Verify that the length isn't past the EOA for the file */
- if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
-
- /* Expand buffer to new size */
- if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
- image = (uint8_t *)new_image;
+ /* If the entry could be read speculatively and the length is still
+ * changing, check for updating the actual size
+ */
+ if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && len_changed) {
+ /* Retrieve the actual length */
+ actual_len = len;
+ if (type->get_final_load_size(image, len, udata, &actual_len) < 0)
+ continue; /* Transfer control to while() and count towards retries */
+
+ /* Check for the length changing */
+ if (actual_len != len) {
+ /* Verify that the length isn't past the EOA for the file */
+ if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
+
+ /* Expand buffer to new size */
+ if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- if (actual_len > len) {
+ if (actual_len > len) {
#ifdef H5_HAVE_PARALLEL
- if (!coll_access || 0 == mpi_rank) {
+ if (!coll_access || 0 == mpi_rank) {
#endif /* H5_HAVE_PARALLEL */
- /* If the thing's image needs to be bigger for a speculatively
- * loaded thing, go get the on-disk image again (the extra portion).
- */
- if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len,
- image + len) < 0) {
+ /* If the thing's image needs to be bigger for a speculatively
+ * loaded thing, go get the on-disk image again (the extra portion).
+ */
+ if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) <
+ 0) {
#ifdef H5_HAVE_PARALLEL
- if (coll_access) {
- /* Push an error, but still participate in following MPI_Bcast */
- HDmemset(image + len, 0, actual_len - len);
- HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
- }
- else
-#endif
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ if (coll_access) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDmemset(image + len, 0, actual_len - len);
+ HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
}
+ else
+#endif
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ }
#ifdef H5_HAVE_PARALLEL
- }
- /* If the collective metadata read optimization is turned on,
- * Bcast the metadata read from process 0 to all ranks in the file
- * communicator */
- if (coll_access) {
- int buf_size;
-
- H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
- } /* end if */
- } /* end if (actual_len != len) */
- else {
- /* The length has stabilized */
- len_changed = FALSE;
+ }
+ /* If the collective metadata read optimization is turned on,
+ * Bcast the metadata read from process 0 to all ranks in the file
+ * communicator */
+ if (coll_access) {
+ int buf_size;
- /* Set the final length */
- len = actual_len;
- } /* else */
- } /* end if */
+ H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+ } /* end if */
+ } /* end if (actual_len != len) */
+ else {
+ /* The length has stabilized */
+ len_changed = FALSE;
- /* If there's no way to verify the checksum for a piece of metadata
- * (usually because there's no checksum in the file), leave now
- */
- if (type->verify_chksum == NULL)
- break;
+ /* Set the final length */
+ len = actual_len;
+ } /* else */
+ } /* end if */
- /* Verify the checksum for the metadata image */
- if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
- if (chk_ret == TRUE)
- break;
+ /* If there's no way to verify the checksum for a piece of metadata
+ * (usually because there's no checksum in the file), leave now
+ */
+ if (type->verify_chksum == NULL)
+ break;
- /* Sleep for some time */
- H5_nanosleep(nanosec);
- nanosec *= 2; /* Double the sleep time next time */
- } while (--tries);
+ /* Verify the checksum for the metadata image */
+ if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
+ if (chk_ret == TRUE)
+ break;
- /* Check for too many tries */
- if (tries == 0)
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL,
- "incorrect metadata checksum after all read attempts")
+ /* Sleep for some time */
+ H5_nanosleep(nanosec);
+ nanosec *= 2; /* Double the sleep time next time */
+ } while (--tries);
- /* Calculate and track the # of retries */
- retries = max_tries - tries;
- if (retries) /* Does not track 0 retry */
- if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
+ /* Check for too many tries */
+ if (tries == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadata checksum after all read attempts")
- /* Set the final length (in case it wasn't set earlier) */
- len = actual_len;
- } /* end if !H5C__CLASS_SKIP_READS */
+ /* Calculate and track the # of retries */
+ retries = max_tries - tries;
+ if (retries) /* Does not track 0 retry */
+ if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
- /* Deserialize the on-disk image into the native memory form */
- if (NULL == (thing = type->deserialize(image, len, udata, &dirty)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
+ /* Set the final length (in case it wasn't set earlier) */
+ len = actual_len;
+ } /* end if !H5C__CLASS_SKIP_READS */
- entry = (H5C_cache_entry_t *)thing;
+ /* Deserialize the on-disk image into the native memory form */
+ if (NULL == (thing = type->deserialize(image, len, udata, &dirty)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
- /* In general, an entry should be clean just after it is loaded.
- *
- * However, when this code is used in the metadata cache, it is
- * possible that object headers will be dirty at this point, as
- * the deserialize function will alter object headers if necessary to
- * fix an old bug.
- *
- * In the following assert:
- *
- * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
- *
- * note that type ids 5 & 6 are associated with object headers in the
- * metadata cache.
- *
- * When we get to using H5C for other purposes, we may wish to
- * tighten up the assert so that the loophole only applies to the
- * metadata cache.
- */
+ entry = (H5C_cache_entry_t *)thing;
+
+ /* In general, an entry should be clean just after it is loaded.
+ *
+ * However, when this code is used in the metadata cache, it is
+ * possible that object headers will be dirty at this point, as
+ * the deserialize function will alter object headers if necessary to
+ * fix an old bug.
+ *
+ * In the following assert:
+ *
+ * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
+ *
+ * note that type ids 5 & 6 are associated with object headers in the
+ * metadata cache.
+ *
+ * When we get to using H5C for other purposes, we may wish to
+ * tighten up the assert so that the loophole only applies to the
+ * metadata cache.
+ */
- HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
-
- entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- entry->cache_ptr = f->shared->cache;
- entry->addr = addr;
- entry->size = len;
- HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
- entry->image_ptr = image;
- entry->image_up_to_date = !dirty;
- entry->type = type;
- entry->is_dirty = dirty;
- entry->dirtied = FALSE;
- entry->is_protected = FALSE;
- entry->is_read_only = FALSE;
- entry->ro_ref_count = 0;
- entry->is_pinned = FALSE;
- entry->in_slist = FALSE;
- entry->flush_marker = FALSE;
+ HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
+
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry->cache_ptr = f->shared->cache;
+ entry->addr = addr;
+ entry->size = len;
+ HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
+ entry->image_ptr = image;
+ entry->image_up_to_date = !dirty;
+ entry->type = type;
+ entry->is_dirty = dirty;
+ entry->dirtied = FALSE;
+ entry->is_protected = FALSE;
+ entry->is_read_only = FALSE;
+ entry->ro_ref_count = 0;
+ entry->is_pinned = FALSE;
+ entry->in_slist = FALSE;
+ entry->flush_marker = FALSE;
#ifdef H5_HAVE_PARALLEL
- entry->clear_on_unprotect = FALSE;
- entry->flush_immediately = FALSE;
- entry->coll_access = coll_access;
+ entry->clear_on_unprotect = FALSE;
+ entry->flush_immediately = FALSE;
+ entry->coll_access = coll_access;
#endif /* H5_HAVE_PARALLEL */
- entry->flush_in_progress = FALSE;
- entry->destroy_in_progress = FALSE;
-
- entry->ring = H5C_RING_UNDEFINED;
-
- /* Initialize flush dependency fields */
- entry->flush_dep_parent = NULL;
- entry->flush_dep_nparents = 0;
- entry->flush_dep_parent_nalloc = 0;
- entry->flush_dep_nchildren = 0;
- entry->flush_dep_ndirty_children = 0;
- entry->flush_dep_nunser_children = 0;
- entry->ht_next = NULL;
- entry->ht_prev = NULL;
- entry->il_next = NULL;
- entry->il_prev = NULL;
-
- entry->next = NULL;
- entry->prev = NULL;
+ entry->flush_in_progress = FALSE;
+ entry->destroy_in_progress = FALSE;
+
+ entry->ring = H5C_RING_UNDEFINED;
+
+ /* Initialize flush dependency fields */
+ entry->flush_dep_parent = NULL;
+ entry->flush_dep_nparents = 0;
+ entry->flush_dep_parent_nalloc = 0;
+ entry->flush_dep_nchildren = 0;
+ entry->flush_dep_ndirty_children = 0;
+ entry->flush_dep_nunser_children = 0;
+ entry->ht_next = NULL;
+ entry->ht_prev = NULL;
+ entry->il_next = NULL;
+ entry->il_prev = NULL;
+
+ entry->next = NULL;
+ entry->prev = NULL;
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- entry->aux_next = NULL;
- entry->aux_prev = NULL;
+ entry->aux_next = NULL;
+ entry->aux_prev = NULL;
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
#ifdef H5_HAVE_PARALLEL
- entry->coll_next = NULL;
- entry->coll_prev = NULL;
+ entry->coll_next = NULL;
+ entry->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
- /* initialize cache image related fields */
- entry->include_in_image = FALSE;
- entry->lru_rank = 0;
- entry->image_dirty = FALSE;
- entry->fd_parent_count = 0;
- entry->fd_parent_addrs = NULL;
- entry->fd_child_count = 0;
- entry->fd_dirty_child_count = 0;
- entry->image_fd_height = 0;
- entry->prefetched = FALSE;
- entry->prefetch_type_id = 0;
- entry->age = 0;
- entry->prefetched_dirty = FALSE;
+ /* initialize cache image related fields */
+ entry->include_in_image = FALSE;
+ entry->lru_rank = 0;
+ entry->image_dirty = FALSE;
+ entry->fd_parent_count = 0;
+ entry->fd_parent_addrs = NULL;
+ entry->fd_child_count = 0;
+ entry->fd_dirty_child_count = 0;
+ entry->image_fd_height = 0;
+ entry->prefetched = FALSE;
+ entry->prefetch_type_id = 0;
+ entry->age = 0;
+ entry->prefetched_dirty = FALSE;
#ifndef NDEBUG /* debugging field */
- entry->serialization_count = 0;
+ entry->serialization_count = 0;
#endif /* NDEBUG */
- entry->tl_next = NULL;
- entry->tl_prev = NULL;
- entry->tag_info = NULL;
+ entry->tl_next = NULL;
+ entry->tl_prev = NULL;
+ entry->tag_info = NULL;
- H5C__RESET_CACHE_ENTRY_STATS(entry);
+ H5C__RESET_CACHE_ENTRY_STATS(entry);
- ret_value = thing;
+ ret_value = thing;
done:
- /* Cleanup on error */
- if (NULL == ret_value) {
- /* Release resources */
- if (thing && type->free_icr(thing) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
- if (image)
- image = (uint8_t *)H5MM_xfree(image);
- } /* end if */
+ /* Cleanup on error */
+ if (NULL == ret_value) {
+ /* Release resources */
+ if (thing && type->free_icr(thing) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
+ if (image)
+ image = (uint8_t *)H5MM_xfree(image);
+ } /* end if */
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_load_entry() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_load_entry() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__make_space_in_cache
- *
- * Purpose: Attempt to evict cache entries until the index_size
- * is at least needed_space below max_cache_size.
- *
- * In passing, also attempt to bring cLRU_list_size to a
- * value greater than min_clean_size.
- *
- * Depending on circumstances, both of these goals may
- * be impossible, as in parallel mode, we must avoid generating
- * a write as part of a read (to avoid deadlock in collective
- * I/O), and in all cases, it is possible (though hopefully
- * highly unlikely) that the protected list may exceed the
- * maximum size of the cache.
- *
- * Thus the function simply does its best, returning success
- * unless an error is encountered.
- *
- * Observe that this function cannot occasion a read.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 5/14/04
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed, hbool_t write_permitted)
- {
- H5C_t *cache_ptr = f->shared->cache;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__make_space_in_cache
+ *
+ * Purpose: Attempt to evict cache entries until the index_size
+ * is at least needed_space below max_cache_size.
+ *
+ * In passing, also attempt to bring cLRU_list_size to a
+ * value greater than min_clean_size.
+ *
+ * Depending on circumstances, both of these goals may
+ * be impossible, as in parallel mode, we must avoid generating
+ * a write as part of a read (to avoid deadlock in collective
+ * I/O), and in all cases, it is possible (though hopefully
+ * highly unlikely) that the protected list may exceed the
+ * maximum size of the cache.
+ *
+ * Thus the function simply does its best, returning success
+ * unless an error is encountered.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/14/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
#if H5C_COLLECT_CACHE_STATS
- int32_t clean_entries_skipped = 0;
- int32_t dirty_pf_entries_skipped = 0;
- int32_t total_entries_scanned = 0;
+ int32_t clean_entries_skipped = 0;
+ int32_t dirty_pf_entries_skipped = 0;
+ int32_t total_entries_scanned = 0;
#endif /* H5C_COLLECT_CACHE_STATS */
- uint32_t entries_examined = 0;
- uint32_t initial_list_len;
- size_t empty_space;
- hbool_t reentrant_call = FALSE;
- hbool_t prev_is_dirty = FALSE;
- hbool_t didnt_flush_entry = FALSE;
- hbool_t restart_scan;
- H5C_cache_entry_t *entry_ptr;
- H5C_cache_entry_t *prev_ptr;
- H5C_cache_entry_t *next_ptr;
- uint32_t num_corked_entries = 0;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
+ uint32_t entries_examined = 0;
+ uint32_t initial_list_len;
+ size_t empty_space;
+ hbool_t reentrant_call = FALSE;
+ hbool_t prev_is_dirty = FALSE;
+ hbool_t didnt_flush_entry = FALSE;
+ hbool_t restart_scan;
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *prev_ptr;
+ H5C_cache_entry_t *next_ptr;
+ uint32_t num_corked_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
-
- /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the make
- * space in cache process. To avoid an infinite recursion, set
- * reentrant_call to TRUE, and goto done.
- */
- if (cache_ptr->msic_in_progress) {
- reentrant_call = TRUE;
- HGOTO_DONE(SUCCEED);
- } /* end if */
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
- cache_ptr->msic_in_progress = TRUE;
+ /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the make
+ * space in cache process. To avoid an infinite recursion, set
+ * reentrant_call to TRUE, and goto done.
+ */
+ if (cache_ptr->msic_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED);
+ } /* end if */
- if (write_permitted) {
- restart_scan = FALSE;
- initial_list_len = cache_ptr->LRU_list_len;
- entry_ptr = cache_ptr->LRU_tail_ptr;
+ cache_ptr->msic_in_progress = TRUE;
- if (cache_ptr->index_size >= cache_ptr->max_cache_size)
- empty_space = 0;
- else
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+ if (write_permitted) {
+ restart_scan = FALSE;
+ initial_list_len = cache_ptr->LRU_list_len;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
- while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) ||
- ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) &&
- (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
+ empty_space = 0;
+ else
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- next_ptr = entry_ptr->next;
- prev_ptr = entry_ptr->prev;
+ while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) ||
+ ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) &&
+ (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
- if (prev_ptr != NULL)
- prev_is_dirty = prev_ptr->is_dirty;
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
- if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
+ if (prev_ptr != NULL)
+ prev_is_dirty = prev_ptr->is_dirty;
- /* Skip "dirty" corked entries. */
- ++num_corked_entries;
- didnt_flush_entry = TRUE;
- }
- else if (((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && (!entry_ptr->flush_in_progress) &&
- (!entry_ptr->prefetched_dirty)) {
+ if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
+
+ /* Skip "dirty" corked entries. */
+ ++num_corked_entries;
+ didnt_flush_entry = TRUE;
+ }
+ else if (((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && (!entry_ptr->flush_in_progress) &&
+ (!entry_ptr->prefetched_dirty)) {
- didnt_flush_entry = FALSE;
+ didnt_flush_entry = FALSE;
- if (entry_ptr->is_dirty) {
+ if (entry_ptr->is_dirty) {
#if H5C_COLLECT_CACHE_STATS
- if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) {
+ if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) {
- cache_ptr->entries_scanned_to_make_space++;
- }
+ cache_ptr->entries_scanned_to_make_space++;
+ }
#endif /* H5C_COLLECT_CACHE_STATS */
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
- if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- if ((cache_ptr->entries_removed_counter > 1) ||
- (cache_ptr->last_entry_removed_ptr == prev_ptr))
+ if ((cache_ptr->entries_removed_counter > 1) ||
+ (cache_ptr->last_entry_removed_ptr == prev_ptr))
- restart_scan = TRUE;
- }
- else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
+ restart_scan = TRUE;
+ }
+ else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
#ifdef H5_HAVE_PARALLEL
- && !(entry_ptr->coll_access)
+ && !(entry_ptr->coll_access)
#endif /* H5_HAVE_PARALLEL */
- ) {
+ ) {
#if H5C_COLLECT_CACHE_STATS
- cache_ptr->entries_scanned_to_make_space++;
+ cache_ptr->entries_scanned_to_make_space++;
#endif /* H5C_COLLECT_CACHE_STATS */
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- }
- else {
- /* We have enough space so don't flush clean entry. */
+ if (H5C__flush_single_entry(f, entry_ptr,
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ }
+ else {
+ /* We have enough space so don't flush clean entry. */
#if H5C_COLLECT_CACHE_STATS
- clean_entries_skipped++;
+ clean_entries_skipped++;
#endif /* H5C_COLLECT_CACHE_STATS */
- didnt_flush_entry = TRUE;
- }
+ didnt_flush_entry = TRUE;
+ }
#if H5C_COLLECT_CACHE_STATS
- total_entries_scanned++;
+ total_entries_scanned++;
#endif /* H5C_COLLECT_CACHE_STATS */
- }
- else {
+ }
+ else {
- /* Skip epoch markers, entries that are in the process
- * of being flushed, and entries marked as prefetched_dirty
- * (occurs in the R/O case only).
- */
- didnt_flush_entry = TRUE;
+ /* Skip epoch markers, entries that are in the process
+ * of being flushed, and entries marked as prefetched_dirty
+ * (occurs in the R/O case only).
+ */
+ didnt_flush_entry = TRUE;
#if H5C_COLLECT_CACHE_STATS
- if (entry_ptr->prefetched_dirty)
- dirty_pf_entries_skipped++;
+ if (entry_ptr->prefetched_dirty)
+ dirty_pf_entries_skipped++;
#endif /* H5C_COLLECT_CACHE_STATS */
- }
-
- if (prev_ptr != NULL) {
+ }
- if (didnt_flush_entry) {
+ if (prev_ptr != NULL) {
- /* epoch markers don't get flushed, and we don't touch
- * entries that are in the process of being flushed.
- * Hence no need for sanity checks, as we haven't
- * flushed anything. Thus just set entry_ptr to prev_ptr
- * and go on.
- */
- entry_ptr = prev_ptr;
- }
- else if ((restart_scan) || (prev_ptr->is_dirty != prev_is_dirty) ||
- (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) ||
- (prev_ptr->is_pinned)) {
+ if (didnt_flush_entry) {
- /* something has happened to the LRU -- start over
- * from the tail.
- */
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
- }
- else {
+ /* epoch markers don't get flushed, and we don't touch
+ * entries that are in the process of being flushed.
+ * Hence no need for sanity checks, as we haven't
+ * flushed anything. Thus just set entry_ptr to prev_ptr
+ * and go on.
+ */
+ entry_ptr = prev_ptr;
+ }
+ else if ((restart_scan) || (prev_ptr->is_dirty != prev_is_dirty) ||
+ (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
- entry_ptr = prev_ptr;
- }
+ /* something has happened to the LRU -- start over
+ * from the tail.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
}
else {
- entry_ptr = NULL;
+ entry_ptr = prev_ptr;
}
+ }
+ else {
- entries_examined++;
+ entry_ptr = NULL;
+ }
- if (cache_ptr->index_size >= cache_ptr->max_cache_size) {
+ entries_examined++;
- empty_space = 0;
- }
- else {
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size) {
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
+ empty_space = 0;
+ }
+ else {
- HDassert(cache_ptr->index_size ==
- (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
}
+ HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+ }
+
#if H5C_COLLECT_CACHE_STATS
- cache_ptr->calls_to_msic++;
+ cache_ptr->calls_to_msic++;
- cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
- cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
- cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
+ cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
+ cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
+ cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
- if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) {
+ if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) {
- cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
- }
+ cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
+ }
- if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
- cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
+ if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
+ cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
- if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) {
+ if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) {
- cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
- }
+ cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
+ }
#endif /* H5C_COLLECT_CACHE_STATS */
- /* NEED: work on a better assert for corked entries */
- HDassert((entries_examined > (2 * initial_list_len)) ||
- ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
- cache_ptr->max_cache_size) ||
- ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) ||
- ((num_corked_entries)));
+ /* NEED: work on a better assert for corked entries */
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
+ cache_ptr->max_cache_size) ||
+ ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) ||
+ ((num_corked_entries)));
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- HDassert((entries_examined > (2 * initial_list_len)) ||
- (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size));
- HDassert((entries_examined > (2 * initial_list_len)) ||
- (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size));
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size));
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size));
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
- }
- else {
+ }
+ else {
- HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
+ HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- initial_list_len = cache_ptr->cLRU_list_len;
- entry_ptr = cache_ptr->cLRU_tail_ptr;
+ initial_list_len = cache_ptr->cLRU_list_len;
+ entry_ptr = cache_ptr->cLRU_tail_ptr;
- while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
- (entries_examined <= initial_list_len) && (entry_ptr != NULL)) {
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
- HDassert(!(entry_ptr->is_dirty));
+ while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
+ (entries_examined <= initial_list_len) && (entry_ptr != NULL)) {
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
+ HDassert(!(entry_ptr->is_dirty));
- prev_ptr = entry_ptr->aux_prev;
+ prev_ptr = entry_ptr->aux_prev;
- if ((!(entry_ptr->prefetched_dirty))
+ if ((!(entry_ptr->prefetched_dirty))
#ifdef H5_HAVE_PARALLEL
- && (!(entry_ptr->coll_access))
+ && (!(entry_ptr->coll_access))
#endif /* H5_HAVE_PARALLEL */
- ) {
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ ) {
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- } /* end if */
+ } /* end if */
- /* we are scanning the clean LRU, so the serialize function
- * will not be called on any entry -- thus there is no
- * concern about the list being modified out from under
- * this function.
- */
+ /* we are scanning the clean LRU, so the serialize function
+ * will not be called on any entry -- thus there is no
+ * concern about the list being modified out from under
+ * this function.
+ */
- entry_ptr = prev_ptr;
- entries_examined++;
- }
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ entry_ptr = prev_ptr;
+ entries_examined++;
}
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ }
done:
- /* Sanity checks */
- HDassert(cache_ptr->msic_in_progress);
- if (!reentrant_call)
- cache_ptr->msic_in_progress = FALSE;
- HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
+ /* Sanity checks */
+ HDassert(cache_ptr->msic_in_progress);
+ if (!reentrant_call)
+ cache_ptr->msic_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__make_space_in_cache() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__make_space_in_cache() */
/*-------------------------------------------------------------------------
*
@@ -7793,81 +7794,82 @@ done:
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
- static herr_t H5C_validate_lru_list(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
+static herr_t
+H5C_validate_lru_list(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_NOINIT
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) &&
- (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) {
+ if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) &&
+ (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
+ }
- if (cache_ptr->LRU_list_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
+ if (cache_ptr->LRU_list_len < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
- if ((cache_ptr->LRU_list_len == 1) &&
- ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
- (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size))) {
+ if ((cache_ptr->LRU_list_len == 1) &&
+ ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
+ (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size))) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- }
-
- if ((cache_ptr->LRU_list_len >= 1) &&
- ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
- (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL))) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- }
+ if ((cache_ptr->LRU_list_len >= 1) &&
+ ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
+ (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL))) {
- entry_ptr = cache_ptr->LRU_head_ptr;
- while (entry_ptr != NULL) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ }
- if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
+ entry_ptr = cache_ptr->LRU_head_ptr;
+ while (entry_ptr != NULL) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
- if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
- if ((entry_ptr->is_pinned) || (entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache)) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ if ((entry_ptr->is_pinned) || (entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache)) {
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
}
- if ((cache_ptr->LRU_list_len != len) || (cache_ptr->LRU_list_size != size)) {
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ if ((cache_ptr->LRU_list_len != len) || (cache_ptr->LRU_list_size != size)) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
+ }
done:
- if (ret_value != SUCCEED) {
+ if (ret_value != SUCCEED) {
- HDassert(0);
- }
+ HDassert(0);
+ }
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_validate_lru_list() */
+} /* H5C_validate_lru_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -7894,86 +7896,87 @@ done:
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
- static herr_t H5C_validate_pinned_entry_list(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
-
- FUNC_ENTER_NOAPI_NOINIT
+static herr_t
+H5C_validate_pinned_entry_list(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ FUNC_ENTER_NOAPI_NOINIT
- if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) &&
- (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) {
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- }
+ if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) &&
+ (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) {
- if (cache_ptr->pel_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
+ }
- if ((cache_ptr->pel_len == 1) &&
- ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
- (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size))) {
+ if (cache_ptr->pel_len < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- }
+ if ((cache_ptr->pel_len == 1) &&
+ ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
+ (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size))) {
- if ((cache_ptr->pel_len >= 1) &&
- ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
- (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL))) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- }
+ if ((cache_ptr->pel_len >= 1) &&
+ ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
+ (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL))) {
- entry_ptr = cache_ptr->pel_head_ptr;
- while (entry_ptr != NULL) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ }
- if ((entry_ptr != cache_ptr->pel_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while (entry_ptr != NULL) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ if ((entry_ptr != cache_ptr->pel_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
- if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
- if (!entry_ptr->is_pinned) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ if (!entry_ptr->is_pinned) {
- if (!((entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache))) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ if (!((entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache))) {
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
}
- if ((cache_ptr->pel_len != len) || (cache_ptr->pel_size != size)) {
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
- }
+ if ((cache_ptr->pel_len != len) || (cache_ptr->pel_size != size)) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
+ }
done:
- if (ret_value != SUCCEED) {
+ if (ret_value != SUCCEED) {
- HDassert(0);
- }
+ HDassert(0);
+ }
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_validate_pinned_entry_list() */
+} /* H5C_validate_pinned_entry_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -8000,84 +8003,85 @@ done:
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
- static herr_t H5C_validate_protected_entry_list(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
-
- FUNC_ENTER_NOAPI_NOINIT
+static herr_t
+H5C_validate_protected_entry_list(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ FUNC_ENTER_NOAPI_NOINIT
- if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) &&
- (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if (cache_ptr->pl_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
+ if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) &&
+ (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- if ((cache_ptr->pl_len == 1) &&
- ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
- (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size))) {
+ if (cache_ptr->pl_len < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- }
+ if ((cache_ptr->pl_len == 1) &&
+ ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
+ (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size))) {
- if ((cache_ptr->pl_len >= 1) &&
- ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
- (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL))) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- }
+ if ((cache_ptr->pl_len >= 1) &&
+ ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
+ (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL))) {
- entry_ptr = cache_ptr->pl_head_ptr;
- while (entry_ptr != NULL) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ }
- if ((entry_ptr != cache_ptr->pl_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
+ entry_ptr = cache_ptr->pl_head_ptr;
+ while (entry_ptr != NULL) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ if ((entry_ptr != cache_ptr->pl_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
- if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
- if (!entry_ptr->is_protected) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ if (!entry_ptr->is_protected) {
- if ((entry_ptr->is_read_only) && (entry_ptr->ro_ref_count <= 0)) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ if ((entry_ptr->is_read_only) && (entry_ptr->ro_ref_count <= 0)) {
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
}
- if ((cache_ptr->pl_len != len) || (cache_ptr->pl_size != size)) {
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
- }
+ if ((cache_ptr->pl_len != len) || (cache_ptr->pl_size != size)) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
+ }
done:
- if (ret_value != SUCCEED) {
+ if (ret_value != SUCCEED) {
- HDassert(0);
- }
+ HDassert(0);
+ }
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_validate_protected_entry_list() */
+} /* H5C_validate_protected_entry_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -8102,1271 +8106,1281 @@ done:
*/
#if H5C_DO_SLIST_SANITY_CHECKS
- static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t * target_ptr)
- {
- hbool_t in_slist = FALSE;
- H5SL_node_t *node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
+static hbool_t
+H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
+{
+ hbool_t in_slist = FALSE;
+ H5SL_node_t *node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- while ((node_ptr != NULL) && (!in_slist)) {
- entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ while ((node_ptr != NULL) && (!in_slist)) {
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->in_slist);
- if (entry_ptr == target_ptr) {
+ if (entry_ptr == target_ptr) {
- in_slist = TRUE;
- }
- else {
+ in_slist = TRUE;
+ }
+ else {
- node_ptr = H5SL_next(node_ptr);
- }
+ node_ptr = H5SL_next(node_ptr);
}
+ }
- return (in_slist);
+ return (in_slist);
- } /* H5C_entry_in_skip_list() */
+} /* H5C_entry_in_skip_list() */
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_marked_entries
- *
- * Purpose: Flushes all marked entries in the cache.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: Mike McGreevy
- * November 3, 2010
- *
- * Changes: Modified function to setup the slist before calling
- * H%C_flush_cache(), and take it down afterwards. Note
- * that the slist need not be empty after the call to
- * H5C_flush_cache() since we are only flushing marked
- * entries. Thus must set the clear_slist parameter
- * of H5C_set_slist_enabled to TRUE.
- *
- * JRM -- 5/6/20
- *
- *-------------------------------------------------------------------------
- */
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flush_marked_entries
+ *
+ * Purpose: Flushes all marked entries in the cache.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Mike McGreevy
+ * November 3, 2010
+ *
+ * Changes: Modified function to setup the slist before calling
+ * H%C_flush_cache(), and take it down afterwards. Note
+ * that the slist need not be empty after the call to
+ * H5C_flush_cache() since we are only flushing marked
+ * entries. Thus must set the clear_slist parameter
+ * of H5C_set_slist_enabled to TRUE.
+ *
+ * JRM -- 5/6/20
+ *
+ *-------------------------------------------------------------------------
+ */
- herr_t H5C__flush_marked_entries(H5F_t * f)
- {
- herr_t ret_value = SUCCEED;
+herr_t
+H5C__flush_marked_entries(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_PACKAGE
- /* Assertions */
- HDassert(f != NULL);
+ /* Assertions */
+ HDassert(f != NULL);
- /* Enable the slist, as it is needed in the flush */
- if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+ /* Enable the slist, as it is needed in the flush */
+ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
- /* Flush all marked entries */
- if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
+ /* Flush all marked entries */
+ if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
- /* Disable the slist. Set the clear_slist parameter to TRUE
- * since we called H5C_flush_cache() with the
- * H5C__FLUSH_MARKED_ENTRIES_FLAG.
- */
- if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
+ /* Disable the slist. Set the clear_slist parameter to TRUE
+ * since we called H5C_flush_cache() with the
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG.
+ */
+ if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
done:
- FUNC_LEAVE_NOAPI(ret_value)
-
- } /* H5C__flush_marked_entries */
-
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_cork
- *
- * Purpose: To cork/uncork/get cork status of an object depending on "action":
- * H5C__SET_CORK:
- * To cork the object
- * Return error if the object is already corked
- * H5C__UNCORK:
- * To uncork the object
- * Return error if the object is not corked
- * H5C__GET_CORKED:
- * To retrieve the cork status of an object in
- * the parameter "corked"
- *
- * Return: Success: Non-negative
- * Failure: Negative
- *
- * Programmer: Vailin Choi
- * January 2014
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_cork(H5C_t * cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
- {
- H5C_tag_info_t *tag_info; /* Points to a tag info struct */
- herr_t ret_value = SUCCEED;
+ FUNC_LEAVE_NOAPI(ret_value)
- FUNC_ENTER_NOAPI_NOINIT
+} /* H5C__flush_marked_entries */
- /* Assertions */
- HDassert(cache_ptr != NULL);
- HDassert(H5F_addr_defined(obj_addr));
- HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED);
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_cork
+ *
+ * Purpose: To cork/uncork/get cork status of an object depending on "action":
+ * H5C__SET_CORK:
+ * To cork the object
+ * Return error if the object is already corked
+ * H5C__UNCORK:
+ * To uncork the object
+ * Return error if the object is not corked
+ * H5C__GET_CORKED:
+ * To retrieve the cork status of an object in
+ * the parameter "corked"
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi
+ * January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
+{
+ H5C_tag_info_t *tag_info; /* Points to a tag info struct */
+ herr_t ret_value = SUCCEED;
- /* Search the list of corked object addresses in the cache */
- tag_info = (H5C_tag_info_t *)H5SL_search(cache_ptr->tag_list, &obj_addr);
+ FUNC_ENTER_NOAPI_NOINIT
- if (H5C__GET_CORKED == action) {
- HDassert(corked);
- if (tag_info != NULL && tag_info->corked)
- *corked = TRUE;
- else
- *corked = FALSE;
- } /* end if */
- else {
- /* Sanity check */
- HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
-
- /* Perform appropriate action */
- if (H5C__SET_CORK == action) {
- /* Check if this is the first entry for this tagged object */
- if (NULL == tag_info) {
- /* Allocate new tag info struct */
- if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry")
-
- /* Set the tag for all entries */
- tag_info->tag = obj_addr;
-
- /* Insert tag info into skip list */
- if (H5SL_insert(cache_ptr->tag_list, tag_info, &(tag_info->tag)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert tag info in skip list")
- } /* end if */
- else {
- /* Check for object already corked */
- if (tag_info->corked)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked")
- HDassert(tag_info->entry_cnt > 0 && tag_info->head);
- } /* end else */
+ /* Assertions */
+ HDassert(cache_ptr != NULL);
+ HDassert(H5F_addr_defined(obj_addr));
+ HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED);
- /* Set the corked status for the entire object */
- tag_info->corked = TRUE;
- cache_ptr->num_objs_corked++;
+ /* Search the list of corked object addresses in the cache */
+ tag_info = (H5C_tag_info_t *)H5SL_search(cache_ptr->tag_list, &obj_addr);
+ if (H5C__GET_CORKED == action) {
+ HDassert(corked);
+ if (tag_info != NULL && tag_info->corked)
+ *corked = TRUE;
+ else
+ *corked = FALSE;
+ } /* end if */
+ else {
+ /* Sanity check */
+ HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
+
+ /* Perform appropriate action */
+ if (H5C__SET_CORK == action) {
+ /* Check if this is the first entry for this tagged object */
+ if (NULL == tag_info) {
+ /* Allocate new tag info struct */
+ if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry")
+
+ /* Set the tag for all entries */
+ tag_info->tag = obj_addr;
+
+ /* Insert tag info into skip list */
+ if (H5SL_insert(cache_ptr->tag_list, tag_info, &(tag_info->tag)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert tag info in skip list")
} /* end if */
else {
- /* Sanity check */
- HDassert(tag_info);
+ /* Check for object already corked */
+ if (tag_info->corked)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked")
+ HDassert(tag_info->entry_cnt > 0 && tag_info->head);
+ } /* end else */
- /* Check for already uncorked */
- if (!tag_info->corked)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked")
+ /* Set the corked status for the entire object */
+ tag_info->corked = TRUE;
+ cache_ptr->num_objs_corked++;
- /* Set the corked status for the entire object */
- tag_info->corked = FALSE;
- cache_ptr->num_objs_corked--;
+ } /* end if */
+ else {
+ /* Sanity check */
+ HDassert(tag_info);
- /* Remove the tag info from the tag list, if there's no more entries with this tag */
- if (0 == tag_info->entry_cnt) {
- /* Sanity check */
- HDassert(NULL == tag_info->head);
+ /* Check for already uncorked */
+ if (!tag_info->corked)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked")
- if (H5SL_remove(cache_ptr->tag_list, &(tag_info->tag)) != tag_info)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove tag info from list")
+ /* Set the corked status for the entire object */
+ tag_info->corked = FALSE;
+ cache_ptr->num_objs_corked--;
- /* Release the tag info */
- tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
- } /* end if */
- else
- HDassert(NULL != tag_info->head);
- } /* end else */
- } /* end else */
+ /* Remove the tag info from the tag list, if there's no more entries with this tag */
+ if (0 == tag_info->entry_cnt) {
+ /* Sanity check */
+ HDassert(NULL == tag_info->head);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_cork() */
+ if (H5SL_remove(cache_ptr->tag_list, &(tag_info->tag)) != tag_info)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove tag info from list")
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_dirty()
- *
- * Purpose: Recursively propagate the flush_dep_ndirty_children flag
- * up the dependency chain in response to entry either
- * becoming dirty or having its flush_dep_ndirty_children
- * increased from 0.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Neil Fortner
- * 11/13/12
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry)
- {
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ /* Release the tag info */
+ tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
+ } /* end if */
+ else
+ HDassert(NULL != tag_info->head);
+ } /* end else */
+ } /* end else */
- FUNC_ENTER_STATIC
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_cork() */
- /* Sanity checks */
- HDassert(entry);
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_dirty()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming dirty or having its flush_dep_ndirty_children
+ * increased from 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Iterate over the parent entries, if any */
- for (u = 0; u < entry->flush_dep_nparents; u++) {
- /* Sanity check */
- HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children <
- entry->flush_dep_parent[u]->flush_dep_nchildren);
+ FUNC_ENTER_STATIC
- /* Adjust the parent's number of dirty children */
- entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
+ /* Sanity checks */
+ HDassert(entry);
- /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
- if (entry->flush_dep_parent[u]->type->notify &&
- (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED,
- entry->flush_dep_parent[u]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag set")
- } /* end for */
+ /* Iterate over the parent entries, if any */
+ for (u = 0; u < entry->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children <
+ entry->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
+ if (entry->flush_dep_parent[u]->type->notify &&
+ (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED,
+ entry->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag set")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_dirty() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_dirty() */
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_clean()
- *
- * Purpose: Recursively propagate the flush_dep_ndirty_children flag
- * up the dependency chain in response to entry either
- * becoming clean or having its flush_dep_ndirty_children
- * reduced to 0.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Neil Fortner
- * 11/13/12
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry)
- {
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_clean()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming clean or having its flush_dep_ndirty_children
+ * reduced to 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry)
+{
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(entry);
+ /* Sanity checks */
+ HDassert(entry);
- /* Iterate over the parent entries, if any */
- /* Note reverse iteration order, in case the callback removes the flush
- * dependency - QAK, 2017/08/12
- */
- for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
- /* Sanity check */
- HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
+ /* Iterate over the parent entries, if any */
+ /* Note reverse iteration order, in case the callback removes the flush
+ * dependency - QAK, 2017/08/12
+ */
+ for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
- /* Adjust the parent's number of dirty children */
- entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
- /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
- if (entry->flush_dep_parent[i]->type->notify &&
- (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED,
- entry->flush_dep_parent[i]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag reset")
- } /* end for */
+ /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
+ if (entry->flush_dep_parent[i]->type->notify &&
+ (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED,
+ entry->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag reset")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_clean() */
-
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_serialized()
- *
- * Purpose: Decrement the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
- * the target entry becoming serialized.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 8/30/16
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr)
- {
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_clean() */
- /* Sanity checks */
- HDassert(entry_ptr);
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_serialized()
+ *
+ * Purpose: Decrement the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming serialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr)
+{
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Iterate over the parent entries, if any */
- /* Note reverse iteration order, in case the callback removes the flush
- * dependency - QAK, 2017/08/12
- */
- for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
- /* Sanity checks */
- HDassert(entry_ptr->flush_dep_parent);
- HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
+ FUNC_ENTER_STATIC
- /* decrement the parents number of unserialized children */
- entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
+ /* Sanity checks */
+ HDassert(entry_ptr);
- /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
- if (entry_ptr->flush_dep_parent[i]->type->notify &&
- (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED,
- entry_ptr->flush_dep_parent[i]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag set")
- } /* end for */
+ /* Iterate over the parent entries, if any */
+ /* Note reverse iteration order, in case the callback removes the flush
+ * dependency - QAK, 2017/08/12
+ */
+ for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
+ /* Sanity checks */
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
+
+ /* decrement the parents number of unserialized children */
+ entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if (entry_ptr->flush_dep_parent[i]->type->notify &&
+ (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED,
+ entry_ptr->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag set")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_serialized() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_serialized() */
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_unserialized()
- *
- * Purpose: Increment the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
- * the target entry becoming unserialized.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 8/30/16
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr)
- {
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_unserialized()
+ *
+ * Purpose: Increment the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming unserialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(entry_ptr);
+ /* Sanity checks */
+ HDassert(entry_ptr);
- /* Iterate over the parent entries, if any */
- for (u = 0; u < entry_ptr->flush_dep_nparents; u++) {
- /* Sanity check */
- HDassert(entry_ptr->flush_dep_parent);
- HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
- entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
-
- /* increment parents number of usserialized children */
- entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
-
- /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
- if (entry_ptr->flush_dep_parent[u]->type->notify &&
- (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED,
- entry_ptr->flush_dep_parent[u]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag reset")
- } /* end for */
+ /* Iterate over the parent entries, if any */
+ for (u = 0; u < entry_ptr->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
+ entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* increment parents number of usserialized children */
+ entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if (entry_ptr->flush_dep_parent[u]->type->notify &&
+ (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED,
+ entry_ptr->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag reset")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_unserialized() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_unserialized() */
#ifndef NDEBUG
- /*-------------------------------------------------------------------------
- * Function: H5C__assert_flush_dep_nocycle()
- *
- * Purpose: Assert recursively that base_entry is not the same as
- * entry, and perform the same assertion on all of entry's
- * flush dependency parents. This is used to detect cycles
- * created by flush dependencies.
- *
- * Return: void
- *
- * Programmer: Neil Fortner
- * 12/10/12
- *
- *-------------------------------------------------------------------------
- */
- static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry,
- const H5C_cache_entry_t *base_entry)
- {
- unsigned u; /* Local index variable */
+/*-------------------------------------------------------------------------
+ * Function: H5C__assert_flush_dep_nocycle()
+ *
+ * Purpose: Assert recursively that base_entry is not the same as
+ * entry, and perform the same assertion on all of entry's
+ * flush dependency parents. This is used to detect cycles
+ * created by flush dependencies.
+ *
+ * Return: void
+ *
+ * Programmer: Neil Fortner
+ * 12/10/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry)
+{
+ unsigned u; /* Local index variable */
- FUNC_ENTER_STATIC_NOERR
+ FUNC_ENTER_STATIC_NOERR
- /* Sanity checks */
- HDassert(entry);
- HDassert(base_entry);
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(base_entry);
- /* Make sure the entries are not the same */
- HDassert(base_entry != entry);
+ /* Make sure the entries are not the same */
+ HDassert(base_entry != entry);
- /* Iterate over entry's parents (if any) */
- for (u = 0; u < entry->flush_dep_nparents; u++)
- H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
+ /* Iterate over entry's parents (if any) */
+ for (u = 0; u < entry->flush_dep_nparents; u++)
+ H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
- FUNC_LEAVE_NOAPI_VOID
- } /* H5C__assert_flush_dep_nocycle() */
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5C__assert_flush_dep_nocycle() */
#endif /* NDEBUG */
- /*-------------------------------------------------------------------------
- * Function: H5C__serialize_cache
- *
- * Purpose: Serialize (i.e. construct an on disk image) for all entries
- * in the metadata cache including clean entries.
- *
- * Note that flush dependencies and "flush me last" flags
- * must be observed in the serialization process.
- *
- * Note also that entries may be loaded, flushed, evicted,
- * expunged, relocated, resized, or removed from the cache
- * during this process, just as these actions may occur during
- * a regular flush.
- *
- * However, we are given that the cache will contain no protected
- * entries on entry to this routine (although entries may be
- * briefly protected and then unprotected during the serialize
- * process).
- *
- * The objective of this routine is serialize all entries and
- * to force all entries into their actual locations on disk.
- *
- * The initial need for this routine is to settle all entries
- * in the cache prior to construction of the metadata cache
- * image so that the size of the cache image can be calculated.
- * However, I gather that other uses for the routine are
- * under consideration.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 7/22/15
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__serialize_cache(H5F_t * f)
- {
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_cache
+ *
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
+ *
+ * Note that flush dependencies and "flush me last" flags
+ * must be observed in the serialization process.
+ *
+ * Note also that entries may be loaded, flushed, evicted,
+ * expunged, relocated, resized, or removed from the cache
+ * during this process, just as these actions may occur during
+ * a regular flush.
+ *
+ * However, we are given that the cache will contain no protected
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
+ *
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
+ *
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
+ * image so that the size of the cache image can be calculated.
+ * However, I gather that other uses for the routine are
+ * under consideration.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 7/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_cache(H5F_t *f)
+{
#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
- H5C_t *cache_ptr;
- herr_t ret_value = SUCCEED;
+ H5C_ring_t ring;
+ H5C_t *cache_ptr;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_PACKAGE
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
#if H5C_DO_SANITY_CHECKS
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+ for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#ifndef NDEBUG
- /* if this is a debug build, set the serialization_count field of
- * each entry in the cache to zero before we start the serialization.
- * This allows us to detect the case in which any entry is serialized
- * more than once (a performance issues), and more importantly, the
- * case is which any flush dependency parent is serializes more than
- * once (a correctness issue).
- */
- {
- H5C_cache_entry_t *scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while (scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- scan_ptr->serialization_count = 0;
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
- /* set cache_ptr->serialization_in_progress to TRUE, and back
- * to FALSE at the end of the function. Must maintain this flag
- * to support H5C_get_serialization_in_progress(), which is in
- * turn required to support sanity checking in some cache
- * clients.
- */
- HDassert(!cache_ptr->serialization_in_progress);
- cache_ptr->serialization_in_progress = TRUE;
+ /* if this is a debug build, set the serialization_count field of
+ * each entry in the cache to zero before we start the serialization.
+ * This allows us to detect the case in which any entry is serialized
+ * more than once (a performance issues), and more importantly, the
+ * case is which any flush dependency parent is serializes more than
+ * once (a correctness issue).
+ */
+ {
+ H5C_cache_entry_t *scan_ptr = NULL;
- /* Serialize each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
- while (ring < H5C_RING_NTYPES) {
- HDassert(cache_ptr->close_warning_received);
- switch (ring) {
- case H5C_RING_USER:
- break;
+ scan_ptr = cache_ptr->il_head;
+ while (scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ scan_ptr->serialization_count = 0;
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
- case H5C_RING_RDFSM:
- /* Settle raw data FSM */
- if (!cache_ptr->rdfsm_settled)
- if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
- break;
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
+ * to FALSE at the end of the function. Must maintain this flag
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
+ * clients.
+ */
+ HDassert(!cache_ptr->serialization_in_progress);
+ cache_ptr->serialization_in_progress = TRUE;
- case H5C_RING_MDFSM:
- /* Settle metadata FSM */
- if (!cache_ptr->mdfsm_settled)
- if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
- break;
+ /* Serialize each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while (ring < H5C_RING_NTYPES) {
+ HDassert(cache_ptr->close_warning_received);
+ switch (ring) {
+ case H5C_RING_USER:
+ break;
- case H5C_RING_SBE:
- case H5C_RING_SB:
- break;
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if (!cache_ptr->rdfsm_settled)
+ if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
- break;
- } /* end switch */
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if (!cache_ptr->mdfsm_settled)
+ if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
- if (H5C__serialize_ring(f, ring) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
- ring++;
- } /* end while */
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
+
+ if (H5C__serialize_ring(f, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+
+ ring++;
+ } /* end while */
#ifndef NDEBUG
- /* Verify that no entry has been serialized more than once.
- * FD parents with multiple serializations should have been caught
- * elsewhere, so no specific check for them here.
- */
- {
- H5C_cache_entry_t *scan_ptr = NULL;
+ /* Verify that no entry has been serialized more than once.
+ * FD parents with multiple serializations should have been caught
+ * elsewhere, so no specific check for them here.
+ */
+ {
+ H5C_cache_entry_t *scan_ptr = NULL;
- scan_ptr = cache_ptr->il_head;
- while (scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(scan_ptr->serialization_count <= 1);
+ scan_ptr = cache_ptr->il_head;
+ while (scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(scan_ptr->serialization_count <= 1);
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
done:
- cache_ptr->serialization_in_progress = FALSE;
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__serialize_cache() */
+ cache_ptr->serialization_in_progress = FALSE;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_cache() */
- /*-------------------------------------------------------------------------
- * Function: H5C__serialize_ring
- *
- * Purpose: Serialize the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been serialized on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
- * returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the index list
- * serializing entries in flush dependency order.
- *
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
- * and below are marked for exclusion from the image.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/11/15
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__serialize_ring(H5F_t * f, H5C_ring_t ring)
- {
- hbool_t done = FALSE;
- H5C_t *cache_ptr;
- H5C_cache_entry_t *entry_ptr;
- herr_t ret_value = SUCCEED;
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_ring
+ *
+ * Purpose: Serialize the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been serialized on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
+ * returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the index list
+ * serializing entries in flush dependency order.
+ *
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
+{
+ hbool_t done = FALSE;
+ H5C_t *cache_ptr;
+ H5C_cache_entry_t *entry_ptr;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
- HDassert(cache_ptr->serialization_in_progress);
-
- /* The objective here is to serialize all entries in the cache ring
- * in flush dependency order.
- *
- * The basic algorithm is to scan the cache index list looking for
- * unserialized entries that are either not in a flush dependency
- * relationship, or which have no unserialized children. Any such
- * entry is serialized and its flush dependency parents (if any) are
- * informed -- allowing them to decrement their userialized child counts.
- *
- * However, this algorithm is complicated by the ability
- * of client serialization callbacks to perform operations on
- * on the cache which can result in the insertion, deletion,
- * relocation, resize, dirty, flush, eviction, or removal (via the
- * take ownership flag) of entries. Changes in the flush dependency
- * structure are also possible.
- *
- * On the other hand, the algorithm is simplified by the fact that
- * we are serializing, not flushing. Thus, as long as all entries
- * are serialized correctly, it doesn't matter if we have to go back
- * and serialize an entry a second time.
- *
- * These possible actions result in the following modifications to
- * the basic algorithm:
- *
- * 1) In the event of an entry expunge, eviction or removal, we must
- * restart the scan as it is possible that the next entry in our
- * scan is no longer in the cache. Were we to examine this entry,
- * we would be accessing deallocated memory.
- *
- * 2) A resize, dirty, or insertion of an entry may result in the
- * the increment of a flush dependency parent's dirty and/or
- * unserialized child count. In the context of serializing the
- * the cache, this is a non-issue, as even if we have already
- * serialized the parent, it will be marked dirty and its image
- * marked out of date if appropriate when the child is serialized.
- *
- * However, this is a major issue for a flush, as were this to happen
- * in a flush, it would violate the invariant that the flush dependency
- * feature is intended to enforce. As the metadata cache has no
- * control over the behavior of cache clients, it has no way of
- * preventing this behaviour. However, it should detect it if at all
- * possible.
- *
- * Do this by maintaining a count of the number of times each entry is
- * serialized during a cache serialization. If any flush dependency
- * parent is serialized more than once, throw an assertion failure.
- *
- * 3) An entry relocation will typically change the location of the
- * entry in the index list. This shouldn't cause problems as we
- * will scan the index list until we make a complete pass without
- * finding anything to serialize -- making relocations of either
- * the current or next entries irrelevant.
- *
- * Note that since a relocation may result in our skipping part of
- * the index list, we must always do at least one more pass through
- * the index list after an entry relocation.
- *
- * 4) Changes in the flush dependency structure are possible on
- * entry insertion, load, expunge, evict, or remove. Destruction
- * of a flush dependency has no effect, as it can only relax the
- * flush dependencies. Creation of a flush dependency can create
- * an unserialized child of a flush dependency parent where all
- * flush dependency children were previously serialized. Should
- * this child dirty the flush dependency parent when it is serialized,
- * the parent will be re-serialized.
- *
- * Per the discussion of 2) above, this is a non issue for cache
- * serialization, and a major problem for cache flush. Using the
- * same detection mechanism, throw an assertion failure if this
- * condition appears.
- *
- * Observe that either eviction or removal of entries as a result of
- * a serialization is not a problem as long as the flush dependency
- * tree does not change beyond the removal of a leaf.
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->serialization_in_progress);
+
+ /* The objective here is to serialize all entries in the cache ring
+ * in flush dependency order.
+ *
+ * The basic algorithm is to scan the cache index list looking for
+ * unserialized entries that are either not in a flush dependency
+ * relationship, or which have no unserialized children. Any such
+ * entry is serialized and its flush dependency parents (if any) are
+ * informed -- allowing them to decrement their userialized child counts.
+ *
+ * However, this algorithm is complicated by the ability
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
+ * relocation, resize, dirty, flush, eviction, or removal (via the
+ * take ownership flag) of entries. Changes in the flush dependency
+ * structure are also possible.
+ *
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
+ * are serialized correctly, it doesn't matter if we have to go back
+ * and serialize an entry a second time.
+ *
+ * These possible actions result in the following modifications to
+ * the basic algorithm:
+ *
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
+ * scan is no longer in the cache. Were we to examine this entry,
+ * we would be accessing deallocated memory.
+ *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
+ * However, this is a major issue for a flush, as were this to happen
+ * in a flush, it would violate the invariant that the flush dependency
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
+ * preventing this behaviour. However, it should detect it if at all
+ * possible.
+ *
+ * Do this by maintaining a count of the number of times each entry is
+ * serialized during a cache serialization. If any flush dependency
+ * parent is serialized more than once, throw an assertion failure.
+ *
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
+ * the current or next entries irrelevant.
+ *
+ * Note that since a relocation may result in our skipping part of
+ * the index list, we must always do at least one more pass through
+ * the index list after an entry relocation.
+ *
+ * 4) Changes in the flush dependency structure are possible on
+ * entry insertion, load, expunge, evict, or remove. Destruction
+ * of a flush dependency has no effect, as it can only relax the
+ * flush dependencies. Creation of a flush dependency can create
+ * an unserialized child of a flush dependency parent where all
+ * flush dependency children were previously serialized. Should
+ * this child dirty the flush dependency parent when it is serialized,
+ * the parent will be re-serialized.
+ *
+ * Per the discussion of 2) above, this is a non issue for cache
+ * serialization, and a major problem for cache flush. Using the
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
+ *
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush dependency
+ * tree does not change beyond the removal of a leaf.
+ */
+ while (!done) {
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
*/
- while (!done) {
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ done = TRUE; /* set to FALSE if any activity in inner loop */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Verify that either the entry is already serialized, or
+ * that it is assigned to either the target or an inner
+ * ring.
*/
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- done = TRUE; /* set to FALSE if any activity in inner loop */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Verify that either the entry is already serialized, or
- * that it is assigned to either the target or an inner
- * ring.
- */
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
- /* Skip flush me last entries or inner ring entries */
- if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
+ /* Skip flush me last entries or inner ring entries */
+ if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
- /* if we encounter an unserialized entry in the current
- * ring that is not marked flush me last, we are not done.
- */
- if (!entry_ptr->image_up_to_date)
- done = FALSE;
+ /* if we encounter an unserialized entry in the current
+ * ring that is not marked flush me last, we are not done.
+ */
+ if (!entry_ptr->image_up_to_date)
+ done = FALSE;
- /* Serialize the entry if its image is not up to date
- * and it has no unserialized flush dependency children.
- */
- if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
- HDassert(entry_ptr->serialization_count == 0);
+ /* Serialize the entry if its image is not up to date
+ * and it has no unserialized flush dependency children.
+ */
+ if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
+ HDassert(entry_ptr->serialization_count == 0);
- /* Serialize the entry */
- if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+ /* Serialize the entry */
+ if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
- /* Check for the cache being perturbed during the entry serialize */
- if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
+ /* Check for the cache being perturbed during the entry serialize */
+ if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
#if H5C_COLLECT_CACHE_STATS
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
#endif /* H5C_COLLECT_CACHE_STATS */
- /* Reset the counters */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
+ /* Reset the counters */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
- /* Restart scan */
- entry_ptr = cache_ptr->il_head;
- } /* end if */
- else
- /* Advance to next entry */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
- } /* while ( ! done ) */
+ /* Restart scan */
+ entry_ptr = cache_ptr->il_head;
+ } /* end if */
+ else
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* At this point, all entries not marked "flush me last" and in
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
+ * verify that all other entries have up to date images.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ if (entry_ptr->ring == ring) {
+ if (entry_ptr->flush_me_last) {
+ if (!entry_ptr->image_up_to_date) {
+ HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
- /* At this point, all entries not marked "flush me last" and in
- * the current ring or outside it should be serialized and have up
- * to date images. Scan the index list again to serialize the
- * "flush me last" entries (if they are in the current ring) and to
- * verify that all other entries have up to date images.
- */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
- HDassert(entry_ptr->ring < H5C_RING_NTYPES);
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+ /* Serialize the entry */
+ if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ /* Check for the cache changing */
+ if ((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flush_me_last entry serialization triggered restart")
- if (entry_ptr->ring == ring) {
- if (entry_ptr->flush_me_last) {
- if (!entry_ptr->image_up_to_date) {
- HDassert(entry_ptr->serialization_count == 0);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- /* Serialize the entry */
- if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- /* Check for the cache changing */
- if ((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flush_me_last entry serialization triggered restart")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
- else {
- HDassert(entry_ptr->image_up_to_date);
- HDassert(entry_ptr->serialization_count <= 1);
HDassert(entry_ptr->flush_dep_nunser_children == 0);
- } /* end else */
- } /* if ( entry_ptr->ring == ring ) */
+ HDassert(entry_ptr->serialization_count == 0);
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+ else {
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->serialization_count <= 1);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ } /* end else */
+ } /* if ( entry_ptr->ring == ring ) */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
done:
- HDassert(cache_ptr->serialization_in_progress);
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__serialize_ring() */
+ HDassert(cache_ptr->serialization_in_progress);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_ring() */
- /*-------------------------------------------------------------------------
- * Function: H5C__serialize_single_entry
- *
- * Purpose: Serialize the cache entry pointed to by the entry_ptr
- * parameter.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer, 7/24/15
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__serialize_single_entry(H5F_t * f, H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_single_entry
+ *
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * parameter.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer, 7/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(!entry_ptr->flush_in_progress);
- HDassert(entry_ptr->type);
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->flush_in_progress);
+ HDassert(entry_ptr->type);
- /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
- * will not be evicted out from under us. Must set it back to FALSE
- * when we are done.
- */
- entry_ptr->flush_in_progress = TRUE;
+ /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
+ * will not be evicted out from under us. Must set it back to FALSE
+ * when we are done.
+ */
+ entry_ptr->flush_in_progress = TRUE;
- /* Allocate buffer for the entry image if required. */
- if (NULL == entry_ptr->image_ptr) {
- HDassert(entry_ptr->size > 0);
- if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
+ /* Allocate buffer for the entry image if required. */
+ if (NULL == entry_ptr->image_ptr) {
+ HDassert(entry_ptr->size > 0);
+ if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
- /* Generate image for entry */
- if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
+ /* Generate image for entry */
+ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
- /* Reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
+ /* Reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
done:
- HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__serialize_single_entry() */
+ HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_single_entry() */
- /*-------------------------------------------------------------------------
- * Function: H5C__generate_image
- *
- * Purpose: Serialize an entry and generate its image.
- *
- * Note: This may cause the entry to be re-sized and/or moved in
- * the cache.
- *
- * As we will not update the metadata cache's data structures
- * until we we finish the write, we must touch up these
- * data structures for size and location changes even if we
- * are about to delete the entry from the cache (i.e. on a
- * flush destroy).
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Mohamad Chaarawi
- * 2/10/16
- *
- * Changes: Updated sanity checks for the possibility that the skip
- * list is disabled.
- * JRM 5/16/20
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__generate_image(H5F_t * f, H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr)
- {
- haddr_t new_addr = HADDR_UNDEF;
- haddr_t old_addr = HADDR_UNDEF;
- size_t new_len = 0;
- unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
- herr_t ret_value = SUCCEED;
+/*-------------------------------------------------------------------------
+ * Function: H5C__generate_image
+ *
+ * Purpose: Serialize an entry and generate its image.
+ *
+ * Note: This may cause the entry to be re-sized and/or moved in
+ * the cache.
+ *
+ * As we will not update the metadata cache's data structures
+ * until we we finish the write, we must touch up these
+ * data structures for size and location changes even if we
+ * are about to delete the entry from the cache (i.e. on a
+ * flush destroy).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ * 2/10/16
+ *
+ * Changes: Updated sanity checks for the possibility that the skip
+ * list is disabled.
+ * JRM 5/16/20
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
+{
+ haddr_t new_addr = HADDR_UNDEF;
+ haddr_t old_addr = HADDR_UNDEF;
+ size_t new_len = 0;
+ unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_PACKAGE
- /* Sanity check */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(entry_ptr->type);
+ /* Sanity check */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(entry_ptr->type);
- /* make note of the entry's current address */
- old_addr = entry_ptr->addr;
+ /* make note of the entry's current address */
+ old_addr = entry_ptr->addr;
- /* Call client's pre-serialize callback, if there's one */
- if ((entry_ptr->type->pre_serialize) &&
- ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size,
- &new_addr, &new_len, &serialize_flags) < 0))
+ /* Call client's pre-serialize callback, if there's one */
+ if ((entry_ptr->type->pre_serialize) &&
+ ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr,
+ &new_len, &serialize_flags) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
- /* Check for any flags set in the pre-serialize callback */
- if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
+ /* Check for any flags set in the pre-serialize callback */
+ if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
- /* Check for unexpected flags from serialize callback */
- if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
+ /* Check for unexpected flags from serialize callback */
+ if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
#ifdef H5_HAVE_PARALLEL
- /* In the parallel case, resizes and moves in
- * the serialize operation can cause problems.
- * If they occur, scream and die.
- *
- * At present, in the parallel case, the aux_ptr
- * will only be set if there is more than one
- * process. Thus we can use this to detect
- * the parallel case.
- *
- * This works for now, but if we start using the
- * aux_ptr for other purposes, we will have to
- * change this test accordingly.
- *
- * NB: While this test detects entryies that attempt
- * to resize or move themselves during a flush
- * in the parallel case, it will not detect an
- * entry that dirties, resizes, and/or moves
- * other entries during its flush.
- *
- * From what Quincey tells me, this test is
- * sufficient for now, as any flush routine that
- * does the latter will also do the former.
- *
- * If that ceases to be the case, further
- * tests will be necessary.
- */
- if (cache_ptr->aux_ptr != NULL)
+ /* In the parallel case, resizes and moves in
+ * the serialize operation can cause problems.
+ * If they occur, scream and die.
+ *
+ * At present, in the parallel case, the aux_ptr
+ * will only be set if there is more than one
+ * process. Thus we can use this to detect
+ * the parallel case.
+ *
+ * This works for now, but if we start using the
+ * aux_ptr for other purposes, we will have to
+ * change this test accordingly.
+ *
+ * NB: While this test detects entryies that attempt
+ * to resize or move themselves during a flush
+ * in the parallel case, it will not detect an
+ * entry that dirties, resizes, and/or moves
+ * other entries during its flush.
+ *
+ * From what Quincey tells me, this test is
+ * sufficient for now, as any flush routine that
+ * does the latter will also do the former.
+ *
+ * If that ceases to be the case, further
+ * tests will be necessary.
+ */
+ if (cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
#endif
- /* If required, resize the buffer and update the entry and the cache
- * data structures
- */
- if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
+ /* If required, resize the buffer and update the entry and the cache
+ * data structures
+ */
+ if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
- /* Sanity check */
- HDassert(new_len > 0);
+ /* Sanity check */
+ HDassert(new_len > 0);
- /* Allocate a new image buffer */
- if (NULL == (entry_ptr->image_ptr =
- H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
+ /* Allocate a new image buffer */
+ if (NULL ==
+ (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- /* Update statistics for resizing the entry */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
+ /* Update statistics for resizing the entry */
+ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- /* Update the hash table for the size change */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr,
- !(entry_ptr->is_dirty));
+ /* Update the hash table for the size change */
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr,
+ !(entry_ptr->is_dirty));
- /* The entry can't be protected since we are in the process of
- * flushing it. Thus we must update the replacement policy data
- * structures for the size change. The macro deals with the pinned
- * case.
- */
- H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
+ /* The entry can't be protected since we are in the process of
+ * flushing it. Thus we must update the replacement policy data
+ * structures for the size change. The macro deals with the pinned
+ * case.
+ */
+ H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- /* As we haven't updated the cache data structures for
- * for the flush or flush destroy yet, the entry should
- * be in the slist if the slist is enabled. Since
- * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the
- * slist is enabled, call it un-conditionally.
- */
- HDassert(entry_ptr->is_dirty);
- HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled));
+ /* As we haven't updated the cache data structures for
+ * for the flush or flush destroy yet, the entry should
+ * be in the slist if the slist is enabled. Since
+ * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the
+ * slist is enabled, call it un-conditionally.
+ */
+ HDassert(entry_ptr->is_dirty);
+ HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled));
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
+ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
- /* Finally, update the entry for its new size */
- entry_ptr->size = new_len;
+ /* Finally, update the entry for its new size */
+ entry_ptr->size = new_len;
- } /* end if */
+ } /* end if */
- /* If required, udate the entry and the cache data structures
- * for a move
- */
- if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
+ /* If required, udate the entry and the cache data structures
+ * for a move
+ */
+ if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
- /* Update stats and entries relocated counter */
- H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
+ /* Update stats and entries relocated counter */
+ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
- /* We must update cache data structures for the change in address */
- if (entry_ptr->addr == old_addr) {
+ /* We must update cache data structures for the change in address */
+ if (entry_ptr->addr == old_addr) {
- /* Delete the entry from the hash table and the slist */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
+ /* Delete the entry from the hash table and the slist */
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
- /* Update the entry for its new address */
- entry_ptr->addr = new_addr;
+ /* Update the entry for its new address */
+ entry_ptr->addr = new_addr;
- /* And then reinsert in the index and slist */
- H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
+ /* And then reinsert in the index and slist */
+ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
- } /* end if */
- else { /* move is already done for us -- just do sanity checks */
+ } /* end if */
+ else { /* move is already done for us -- just do sanity checks */
- HDassert(entry_ptr->addr == new_addr);
- }
- } /* end if */
- } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
+ HDassert(entry_ptr->addr == new_addr);
+ }
+ } /* end if */
+ } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
- /* Serialize object into buffer */
- if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
+ /* Serialize object into buffer */
+ if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
#if H5C_DO_MEMORY_SANITY_CHECKS
- HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE));
+ HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE));
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- entry_ptr->image_up_to_date = TRUE;
+ entry_ptr->image_up_to_date = TRUE;
- /* Propagate the fact that the entry is serialized up the
- * flush dependency chain if appropriate. Since the image must
- * have been out of date for this function to have been called
- * (see assertion on entry), no need to check that -- only check
- * for flush dependency parents.
- */
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ /* Propagate the fact that the entry is serialized up the
+ * flush dependency chain if appropriate. Since the image must
+ * have been out of date for this function to have been called
+ * (see assertion on entry), no need to check that -- only check
+ * for flush dependency parents.
+ */
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
- if (entry_ptr->flush_dep_nparents > 0) {
+ if (entry_ptr->flush_dep_nparents > 0) {
- if (H5C__mark_flush_dep_serialized(entry_ptr) < 0)
+ if (H5C__mark_flush_dep_serialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "Can't propagate serialization status to fd parents")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+ }
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__generate_image */
-
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_remove_entry
- *
- * Purpose: Remove an entry from the cache. Must be not protected, pinned,
- * dirty, involved in flush dependencies, etc.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * September 17, 2016
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_remove_entry(void *_entry)
- {
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */
- H5C_t *cache; /* Cache for file */
- herr_t ret_value = SUCCEED; /* Return value */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__generate_image */
- FUNC_ENTER_NOAPI(FAIL)
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_remove_entry
+ *
+ * Purpose: Remove an entry from the cache. Must be not protected, pinned,
+ * dirty, involved in flush dependencies, etc.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * September 17, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_remove_entry(void *_entry)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */
+ H5C_t *cache; /* Cache for file */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Sanity checks */
- HDassert(entry);
- HDassert(entry->ring != H5C_RING_UNDEFINED);
- cache = entry->cache_ptr;
- HDassert(cache);
- HDassert(cache->magic == H5C__H5C_T_MAGIC);
-
- /* Check for error conditions */
- if (entry->is_dirty)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
- if (entry->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
- if (entry->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
- /* NOTE: If these two errors are getting tripped because the entry is
- * in a flush dependency with a freedspace entry, move the checks
- * after the "before evict" message is sent, and add the
- * "child being evicted" message to the "before evict" notify
- * section below. QAK - 2017/08/03
- */
- if (entry->flush_dep_nparents > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
- "can't remove entry with flush dependency parents from cache")
- if (entry->flush_dep_nchildren > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
- "can't remove entry with flush dependency children from cache")
-
- /* Additional internal cache consistency checks */
- HDassert(!entry->in_slist);
- HDassert(!entry->flush_marker);
- HDassert(!entry->flush_in_progress);
-
- /* Note that the algorithm below is (very) similar to the set of operations
- * in H5C__flush_single_entry() and should be kept in sync with changes
- * to that code. - QAK, 2016/11/30
- */
+ FUNC_ENTER_NOAPI(FAIL)
- /* Update stats, as if we are "destroying" and taking ownership of the entry */
- H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(entry->ring != H5C_RING_UNDEFINED);
+ cache = entry->cache_ptr;
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Check for error conditions */
+ if (entry->is_dirty)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
+ if (entry->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
+ if (entry->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
+ /* NOTE: If these two errors are getting tripped because the entry is
+ * in a flush dependency with a freedspace entry, move the checks
+ * after the "before evict" message is sent, and add the
+ * "child being evicted" message to the "before evict" notify
+ * section below. QAK - 2017/08/03
+ */
+ if (entry->flush_dep_nparents > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
+ "can't remove entry with flush dependency parents from cache")
+ if (entry->flush_dep_nchildren > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
+ "can't remove entry with flush dependency children from cache")
+
+ /* Additional internal cache consistency checks */
+ HDassert(!entry->in_slist);
+ HDassert(!entry->flush_marker);
+ HDassert(!entry->flush_in_progress);
+
+ /* Note that the algorithm below is (very) similar to the set of operations
+ * in H5C__flush_single_entry() and should be kept in sync with changes
+ * to that code. - QAK, 2016/11/30
+ */
- /* If the entry's type has a 'notify' callback, send a 'before eviction'
- * notice while the entry is still fully integrated in the cache.
- */
- if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+ /* Update stats, as if we are "destroying" and taking ownership of the entry */
+ H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
- /* Update the cache internal data structures as appropriate for a destroy.
- * Specifically:
- * 1) Delete it from the index
- * 2) Delete it from the collective read access list
- * 3) Update the replacement policy for eviction
- * 4) Remove it from the tag list for this object
- */
+ /* If the entry's type has a 'notify' callback, send a 'before eviction'
+ * notice while the entry is still fully integrated in the cache.
+ */
+ if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+
+ /* Update the cache internal data structures as appropriate for a destroy.
+ * Specifically:
+ * 1) Delete it from the index
+ * 2) Delete it from the collective read access list
+ * 3) Update the replacement policy for eviction
+ * 4) Remove it from the tag list for this object
+ */
- H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
+ H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
#ifdef H5_HAVE_PARALLEL
- /* Check for collective read access flag */
- if (entry->coll_access) {
- entry->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ /* Check for collective read access flag */
+ if (entry->coll_access) {
+ entry->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
+ H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
- /* Remove entry from tag list */
- if (H5C__untag_entry(cache, entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+ /* Remove entry from tag list */
+ if (H5C__untag_entry(cache, entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
- /* Increment entries_removed_counter and set last_entry_removed_ptr.
- * As we me be about to free the entry, recall that last_entry_removed_ptr
- * must NEVER be dereferenced.
- *
- * Recall that these fields are maintained to allow functions that perform
- * scans of lists of entries to detect the unexpected removal of entries
- * (via expunge, eviction, or take ownership at present), so that they can
- * re-start their scans if necessary.
- *
- * Also check if the entry we are watching for removal is being
- * removed (usually the 'next' entry for an iteration) and reset
- * it to indicate that it was removed.
- */
- cache->entries_removed_counter++;
- cache->last_entry_removed_ptr = entry;
- if (entry == cache->entry_watched_for_removal)
- cache->entry_watched_for_removal = NULL;
+ /* Increment entries_removed_counter and set last_entry_removed_ptr.
+ * As we me be about to free the entry, recall that last_entry_removed_ptr
+ * must NEVER be dereferenced.
+ *
+ * Recall that these fields are maintained to allow functions that perform
+ * scans of lists of entries to detect the unexpected removal of entries
+ * (via expunge, eviction, or take ownership at present), so that they can
+ * re-start their scans if necessary.
+ *
+ * Also check if the entry we are watching for removal is being
+ * removed (usually the 'next' entry for an iteration) and reset
+ * it to indicate that it was removed.
+ */
+ cache->entries_removed_counter++;
+ cache->last_entry_removed_ptr = entry;
+ if (entry == cache->entry_watched_for_removal)
+ cache->entry_watched_for_removal = NULL;
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
- *
- * Now clean up internal cache fields if appropriate.
- */
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
+ *
+ * Now clean up internal cache fields if appropriate.
+ */
- /* Free the buffer for the on disk image */
- if (entry->image_ptr != NULL)
- entry->image_ptr = H5MM_xfree(entry->image_ptr);
+ /* Free the buffer for the on disk image */
+ if (entry->image_ptr != NULL)
+ entry->image_ptr = H5MM_xfree(entry->image_ptr);
- /* Reset the pointer to the cache the entry is within */
- entry->cache_ptr = NULL;
+ /* Reset the pointer to the cache the entry is within */
+ entry->cache_ptr = NULL;
- /* Client is taking ownership of the entry. Set bad magic here so the
- * cache will choke unless the entry is re-inserted properly
- */
- entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+ /* Client is taking ownership of the entry. Set bad magic here so the
+ * cache will choke unless the entry is re-inserted properly
+ */
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__remove_entry() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__remove_entry() */