summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDana Robinson <derobins@hdfgroup.org>2020-08-17 17:05:05 (GMT)
committerDana Robinson <derobins@hdfgroup.org>2020-08-17 17:05:05 (GMT)
commit90fa429c015649fee878530817e503393e9f269c (patch)
tree781be81d7b4f943807b41741eddf957810edd855 /src
parent9457d3d4b41e356121ca2b4d72a199b91b8254af (diff)
downloadhdf5-90fa429c015649fee878530817e503393e9f269c.zip
hdf5-90fa429c015649fee878530817e503393e9f269c.tar.gz
hdf5-90fa429c015649fee878530817e503393e9f269c.tar.bz2
Brings the MDC skiplist optimizations from develop
Diffstat (limited to 'src')
-rw-r--r--src/H5AC.c263
-rw-r--r--src/H5ACmpio.c42
-rw-r--r--src/H5ACprivate.h2
-rw-r--r--src/H5C.c1862
-rw-r--r--src/H5Cdbg.c45
-rw-r--r--src/H5Cimage.c171
-rw-r--r--src/H5Cmpio.c185
-rw-r--r--src/H5Cpkg.h507
-rw-r--r--src/H5Cprivate.h5
-rw-r--r--src/H5Fint.c10
10 files changed, 2305 insertions, 787 deletions
diff --git a/src/H5AC.c b/src/H5AC.c
index 2b9a410..a8de8ea 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -140,8 +140,8 @@ static const H5AC_class_t *const H5AC_class_s[] = {
*
* Purpose: Initialize the interface from some other layer.
*
- * Return: Success: non-negative
- * Failure: negative
+ * Return: Success: non-negative
+ * Failure: negative
*
* Programmer: Quincey Koziol
* Saturday, January 18, 2003
@@ -162,7 +162,7 @@ done:
/*-------------------------------------------------------------------------
- * Function H5AC__init_package
+ * Function: H5AC__init_package
*
* Purpose: Initialize interface-specific information
*
@@ -202,9 +202,9 @@ H5AC__init_package(void)
*
* Purpose: Terminate this interface.
*
- * Return: Success: Positive if anything was done that might
- * affect other interfaces; zero otherwise.
- * Failure: Negative.
+ * Return: Success: Positive if anything was done that might
+ * affect other interfaces; zero otherwise.
+ * Failure: Negative.
*
* Programmer: Quincey Koziol
* Thursday, July 18, 2002
@@ -281,7 +281,7 @@ herr_t
H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_config_t * image_config_ptr)
{
#ifdef H5_HAVE_PARALLEL
- char prefix[H5C__PREFIX_LEN] = "";
+ char prefix[H5C__PREFIX_LEN] = "";
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
struct H5C_cache_image_ctl_t int_ci_config = H5C__DEFAULT_CACHE_IMAGE_CTL;
@@ -468,6 +468,14 @@ done:
* Programmer: Robb Matzke
* Jul 9 1997
*
+ * Changes:
+ *
+ * In the parallel case, added code to setup the MDC slist
+ * before the call to H5AC__flush_entries() and take it down
+ * afterwards.
+ *
+ * JRM -- 7/29/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -493,58 +501,115 @@ H5AC_dest(H5F_t *f)
#endif /* H5AC_DUMP_STATS_ON_CLOSE */
/* Check if log messages are being emitted */
- if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ if(H5C_get_logging_status(f->shared->cache,
+ &log_enabled, &curr_logging) < 0)
+
HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to get logging status")
- if(log_enabled && curr_logging)
+ if(log_enabled && curr_logging) {
+
if(H5C_log_write_destroy_cache_msg(f->shared->cache) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "unable to emit log message")
+
+ HDONE_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, \
+ "unable to emit log message")
+ }
+
/* Tear down logging */
- if(log_enabled)
+ if(log_enabled) {
+
if(H5C_log_tear_down(f->shared->cache) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, "mdc logging tear-down failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_LOGGING, FAIL, \
+ "mdc logging tear-down failed")
+ }
#ifdef H5_HAVE_PARALLEL
+
/* destroying the cache, so clear all collective entries */
if(H5C_clear_coll_entries(f->shared->cache, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, \
+ "H5C_clear_coll_entries() failed")
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache);
- if(aux_ptr)
+
+ if(aux_ptr) {
+
/* Sanity check */
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+
+
+ /* If the file was opened R/W, attempt to flush all entries
+ * from rank 0 & Bcast clean list to other ranks.
+ *
+ * Must not flush in the R/O case, as this will trigger the
+ * free space manager settle routines.
+ *
+ * Must also enable the skip list before the call to
+ * H5AC__flush_entries() and disable it afterwards, as the
+ * skip list will be disabled after the previous flush.
+ *
+ * Note that H5C_dest() does slist setup and take down as well.
+ * Unfortunately, we can't do the setup and take down just once,
+ * as H5C_dest() is called directly in the test code.
+ *
+ * Fortunately, the cache should be clean or close to it at this
+ * point, so the overhead should be minimal.
+ */
+ if(H5F_ACC_RDWR & H5F_INTENT(f)) {
- /* If the file was opened R/W, attempt to flush all entries
- * from rank 0 & Bcast clean list to other ranks.
- *
- * Must not flush in the R/O case, as this will trigger the
- * free space manager settle routines.
- */
- if(H5F_ACC_RDWR & H5F_INTENT(f))
- if(H5AC__flush_entries(f) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+ /* enable and load the slist */
+ if ( H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "set slist enabled failed")
+
+ if(H5AC__flush_entries(f) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+
+ /* disable the slist -- should be empty */
+ if ( H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
+ }
+ }
#endif /* H5_HAVE_PARALLEL */
/* Destroy the cache */
if(H5C_dest(f) < 0)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't destroy cache")
+
f->shared->cache = NULL;
#ifdef H5_HAVE_PARALLEL
+
if(aux_ptr != NULL) {
+
if(aux_ptr->d_slist_ptr != NULL) {
+
HDassert(H5SL_count(aux_ptr->d_slist_ptr) == 0);
H5SL_close(aux_ptr->d_slist_ptr);
+
} /* end if */
+
if(aux_ptr->c_slist_ptr != NULL) {
+
HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
H5SL_close(aux_ptr->c_slist_ptr);
+
} /* end if */
+
if(aux_ptr->candidate_slist_ptr != NULL) {
+
HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
H5SL_close(aux_ptr->candidate_slist_ptr);
+
} /* end if */
+
aux_ptr->magic = 0;
aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr);
+
} /* end if */
#endif /* H5_HAVE_PARALLEL */
@@ -776,20 +841,20 @@ H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
if(in_cache) {
*status |= H5AC_ES__IN_CACHE;
- if(is_dirty)
- *status |= H5AC_ES__IS_DIRTY;
- if(is_protected)
- *status |= H5AC_ES__IS_PROTECTED;
- if(is_pinned)
- *status |= H5AC_ES__IS_PINNED;
- if(is_corked)
- *status |= H5AC_ES__IS_CORKED;
- if(is_flush_dep_parent)
- *status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
- if(is_flush_dep_child)
- *status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
- if(image_is_up_to_date)
- *status |= H5AC_ES__IMAGE_IS_UP_TO_DATE;
+ if(is_dirty)
+ *status |= H5AC_ES__IS_DIRTY;
+ if(is_protected)
+ *status |= H5AC_ES__IS_PROTECTED;
+ if(is_pinned)
+ *status |= H5AC_ES__IS_PINNED;
+ if(is_corked)
+ *status |= H5AC_ES__IS_CORKED;
+ if(is_flush_dep_parent)
+ *status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
+ if(is_flush_dep_child)
+ *status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
+ if(image_is_up_to_date)
+ *status |= H5AC_ES__IMAGE_IS_UP_TO_DATE;
} /* end if */
else
*status = 0;
@@ -1240,6 +1305,109 @@ done:
/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_prep_for_file_flush
+ *
+ * Purpose: This function should be called just prior to the first
+ * call to H5AC_flush() during a file flush.
+ *
+ * Its purpose is to handly any setup required prior to
+ * metadata cache flush.
+ *
+ * Initially, this means setting up the slist prior to the
+ * flush. We do this in a seperate call because
+ * H5F__flush_phase2() make repeated calls to H5AC_flush().
+ * Handling this detail in separate calls allows us to avoid
+ * the overhead of setting up and taking down the skip list
+ * repeatedly.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/5/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_prep_for_file_flush(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ if ( H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist enabled failed")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC_prep_for_file_flush() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_secure_from_file_flush
+ *
+ * Purpose: This function should be called just after the last
+ * call to H5AC_flush() during a file flush.
+ *
+ * Its purpose is to perform any necessary cleanup after the
+ * metadata cache flush.
+ *
+ * The objective of the call is to allow the metadata cache
+ * to do any necessary necessary cleanup work after a cache
+ * flush.
+ *
+ * Initially, this means taking down the slist after the
+ * flush. We do this in a seperate call because
+ * H5F__flush_phase2() make repeated calls to H5AC_flush().
+ * Handling this detail in separate calls allows us to avoid
+ * the overhead of setting up and taking down the skip list
+ * repeatedly.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/5/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_secure_from_file_flush(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ if ( H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist enabled failed")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC_secure_from_file_flush() */
+
+
+/*-------------------------------------------------------------------------
+ *
* Function: H5AC_create_flush_dependency()
*
* Purpose: Create a flush dependency between two entries in the metadata
@@ -1548,8 +1716,8 @@ herr_t
H5AC_unprotect(H5F_t *f, const H5AC_class_t *type, haddr_t addr, void *thing,
unsigned flags)
{
- hbool_t dirtied;
- hbool_t deleted;
+ hbool_t dirtied;
+ hbool_t deleted;
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
@@ -1570,14 +1738,14 @@ H5AC_unprotect(H5F_t *f, const H5AC_class_t *type, haddr_t addr, void *thing,
HDassert(((H5AC_info_t *)thing)->type == type);
dirtied = (hbool_t)(((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG) ||
- (((H5AC_info_t *)thing)->dirtied));
+ (((H5AC_info_t *)thing)->dirtied));
deleted = (hbool_t)((flags & H5C__DELETED_FLAG) == H5C__DELETED_FLAG);
/* Check if the size changed out from underneath us, if we're not deleting
* the entry.
*/
if(dirtied && !deleted) {
- size_t curr_size = 0;
+ size_t curr_size = 0;
if((type->image_len)(thing, &curr_size) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
@@ -1664,7 +1832,7 @@ H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr,
if(internal_config.rpt_fcn == NULL)
config_ptr->rpt_fcn_enabled = FALSE;
else
- config_ptr->rpt_fcn_enabled = TRUE;
+ config_ptr->rpt_fcn_enabled = TRUE;
config_ptr->open_trace_file = FALSE;
config_ptr->close_trace_file = FALSE;
config_ptr->trace_file_name[0] = '\0';
@@ -1926,7 +2094,7 @@ H5AC_validate_config(H5AC_cache_config_t *config_ptr)
/* don't bother to test trace_file_name unless open_trace_file is TRUE */
if(config_ptr->open_trace_file) {
- size_t name_len;
+ size_t name_len;
/* Can't really test the trace_file_name field without trying to
* open the file, so we will content ourselves with a couple of
@@ -2047,9 +2215,9 @@ H5_ATTR_UNUSED
*f, hbool_t *write_permitted_ptr)
{
#ifdef H5_HAVE_PARALLEL
- H5AC_aux_t * aux_ptr = NULL;
+ H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
- hbool_t write_permitted = TRUE;
+ hbool_t write_permitted = TRUE;
FUNC_ENTER_STATIC_NOERR
@@ -2546,8 +2714,8 @@ H5AC_set_ring(H5AC_ring_t ring, H5AC_ring_t *orig_ring)
* Note that this function simply passes the call on to
* the metadata cache proper, and returns the result.
*
- * Return: Success: Non-negative
- * Failure: Negative
+ * Return: Success: Non-negative
+ * Failure: Negative
*
* Programmer: Quincey Koziol
* September 17, 2016
@@ -2682,4 +2850,3 @@ H5AC_get_mdc_image_info(H5AC_t *cache_ptr, haddr_t *image_addr, hsize_t *image_l
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_get_mdc_image_info() */
-
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index c6ffc80..7474460 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -791,7 +791,7 @@ H5AC__log_dirtied_entry(const H5AC_info_t *entry_ptr)
else {
aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- aux_ptr->unprotect_dirty_bytes += entry_size;
+ aux_ptr->unprotect_dirty_bytes += entry_ptr->size;
aux_ptr->unprotect_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
} /* end else */
@@ -989,7 +989,7 @@ H5AC__log_inserted_entry(const H5AC_info_t *entry_ptr)
aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- aux_ptr->insert_dirty_bytes += size;
+ aux_ptr->insert_dirty_bytes += entry_ptr->size;
aux_ptr->insert_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
@@ -1875,6 +1875,8 @@ done:
* Programmer: John Mainzer
* April 28, 2010
*
+ * Changes: None.
+ *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -1894,7 +1896,8 @@ H5AC__rsp__p0_only__flush(H5F_t *f)
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
HDassert(aux_ptr != NULL);
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- HDassert(aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
+ HDassert(aux_ptr->metadata_write_strategy == \
+ H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
/* To prevent "messages from the future" we must
* synchronize all processes before we start the flush.
@@ -1903,9 +1906,12 @@ H5AC__rsp__p0_only__flush(H5F_t *f)
* However, when flushing from within the close operation from a file,
* it's possible to skip this barrier (on the second flush of the cache).
*/
- if(!H5CX_get_mpi_file_flushing())
- if(MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)))
+ if ( ! H5CX_get_mpi_file_flushing() ) {
+
+ if ( MPI_SUCCESS != (mpi_result = MPI_Barrier(aux_ptr->mpi_comm)) )
+
HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_result)
+ }
/* Flush data to disk, from rank 0 process */
if(aux_ptr->mpi_rank == 0) {
@@ -1921,23 +1927,30 @@ H5AC__rsp__p0_only__flush(H5F_t *f)
aux_ptr->write_permitted = FALSE;
/* Check for error on the write operation */
- if(result < 0)
+ if ( result < 0 )
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
/* this code exists primarily for the test bed -- it allows us to
* enforce POSIX semantics on the server that pretends to be a
* file system in our parallel tests.
*/
- if(aux_ptr->write_done)
+ if ( aux_ptr->write_done ) {
+
(aux_ptr->write_done)();
+ }
} /* end if */
/* Propagate cleaned entries to other ranks. */
- if(H5AC__propagate_flushed_and_still_clean_entries_list(f) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
+ if ( H5AC__propagate_flushed_and_still_clean_entries_list(f) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't propagate clean entries list.")
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5AC__rsp__p0_only__flush() */
@@ -2104,16 +2117,22 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op)
/* Sanity checks */
HDassert(f != NULL);
+
cache_ptr = f->shared->cache;
+
HDassert(cache_ptr != NULL);
+
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
+
HDassert(aux_ptr != NULL);
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
HDassert((sync_point_op == H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) ||
- (sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED));
+ (sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED));
+
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
-HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu/%u\n",
+HDfprintf(stdout,
+ "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu/%u\n",
aux_ptr->mpi_rank,
aux_ptr->dirty_bytes_propagations,
aux_ptr->unprotect_dirty_bytes,
@@ -2188,6 +2207,7 @@ HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
done:
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC__run_sync_point() */
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 9f2c215..575c791 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -387,6 +387,8 @@ H5_DLL herr_t H5AC_insert_entry(H5F_t *f, const H5AC_class_t *type,
haddr_t addr, void *thing, unsigned int flags);
H5_DLL herr_t H5AC_pin_protected_entry(void *thing);
H5_DLL herr_t H5AC_prep_for_file_close(H5F_t *f);
+H5_DLL herr_t H5AC_prep_for_file_flush(H5F_t *f);
+H5_DLL herr_t H5AC_secure_from_file_flush(H5F_t *f);
H5_DLL herr_t H5AC_create_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL void * H5AC_protect(H5F_t *f, const H5AC_class_t *type, haddr_t addr,
void *udata, unsigned flags);
diff --git a/src/H5C.c b/src/H5C.c
index f706a35..20182da 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -234,6 +234,82 @@ H5FL_BLK_DEFINE_STATIC(parent);
* Programmer: John Mainzer
* 6/2/04
*
+ * Modifications:
+ *
+ * JRM -- 7/20/04
+ * Updated for the addition of the hash table.
+ *
+ * JRM -- 10/5/04
+ * Added call to H5C_reset_cache_hit_rate_stats(). Also
+ * added initialization for cache_is_full flag and for
+ * resize_ctl.
+ *
+ * JRM -- 11/12/04
+ * Added initialization for the new size_decreased field.
+ *
+ * JRM -- 11/17/04
+ * Added/updated initialization for the automatic cache
+ * size control data structures.
+ *
+ * JRM -- 6/24/05
+ * Added support for the new write_permitted field of
+ * the H5C_t structure.
+ *
+ * JRM -- 7/5/05
+ * Added the new log_flush parameter and supporting code.
+ *
+ * JRM -- 9/21/05
+ * Added the new aux_ptr parameter and supporting code.
+ *
+ * JRM -- 1/20/06
+ * Added initialization of the new prefix field in H5C_t.
+ *
+ * JRM -- 3/16/06
+ * Added initialization for the pinned entry related fields.
+ *
+ * JRM -- 5/31/06
+ * Added initialization for the trace_file_ptr field.
+ *
+ * JRM -- 8/19/06
+ * Added initialization for the flush_in_progress field.
+ *
+ * JRM -- 8/25/06
+ * Added initialization for the slist_len_increase and
+ * slist_size_increase fields. These fields are used
+ * for sanity checking in the flush process, and are not
+ * compiled in unless H5C_DO_SANITY_CHECKS is TRUE.
+ *
+ * JRM -- 3/28/07
+ * Added initialization for the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * JRM -- 7/27/07
+ * Added initialization for the new evictions_enabled
+ * field of H5C_t.
+ *
+ * JRM -- 12/31/07
+ * Added initialization for the new flash cache size increase
+ * related fields of H5C_t.
+ *
+ * JRM -- 11/5/08
+ * Added initialization for the new clean_index_size and
+ * dirty_index_size fields of H5C_t.
+ *
+ *
+ * Missing entries?
+ *
+ *
+ * JRM -- 4/20/20
+ * Added initialization for the slist_enabled field. Recall
+ * that the slist is used to flush metadata cache entries
+ * in (roughly) increasing address order. While this is
+ * needed at flush and close, it is not used elsewhere.
+ * The slist_enabled field exists to allow us to construct
+ * the slist when needed, and leave it empty otherwise -- thus
+ * avoiding the overhead of maintaining it.
+ *
+ * JRM -- 4/29/20
+ *
*-------------------------------------------------------------------------
*/
H5C_t *
@@ -308,30 +384,36 @@ H5C_create(size_t max_cache_size,
cache_ptr->dirty_index_size = (size_t)0;
for(i = 0; i < H5C_RING_NTYPES; i++) {
- cache_ptr->index_ring_len[i] = 0;
- cache_ptr->index_ring_size[i] = (size_t)0;
- cache_ptr->clean_index_ring_size[i] = (size_t)0;
- cache_ptr->dirty_index_ring_size[i] = (size_t)0;
+ cache_ptr->index_ring_len[i] = 0;
+ cache_ptr->index_ring_size[i] = (size_t)0;
+ cache_ptr->clean_index_ring_size[i] = (size_t)0;
+ cache_ptr->dirty_index_ring_size[i] = (size_t)0;
- cache_ptr->slist_ring_len[i] = 0;
- cache_ptr->slist_ring_size[i] = (size_t)0;
+ cache_ptr->slist_ring_len[i] = 0;
+ cache_ptr->slist_ring_size[i] = (size_t)0;
} /* end for */
for(i = 0; i < H5C__HASH_TABLE_LEN; i++)
(cache_ptr->index)[i] = NULL;
- cache_ptr->il_len = 0;
- cache_ptr->il_size = (size_t)0;
- cache_ptr->il_head = NULL;
- cache_ptr->il_tail = NULL;
+ cache_ptr->il_len = 0;
+ cache_ptr->il_size = (size_t)0;
+ cache_ptr->il_head = NULL;
+ cache_ptr->il_tail = NULL;
/* Tagging Field Initializations */
cache_ptr->ignore_tags = FALSE;
cache_ptr->num_objs_corked = 0;
- cache_ptr->slist_changed = FALSE;
- cache_ptr->slist_len = 0;
- cache_ptr->slist_size = (size_t)0;
+ /* slist field initializations */
+ cache_ptr->slist_enabled = ! H5C__SLIST_OPT_ENABLED;
+ cache_ptr->slist_changed = FALSE;
+ cache_ptr->slist_len = 0;
+ cache_ptr->slist_size = (size_t)0;
+
+ /* slist_ring_len, slist_ring_size, and
+ * slist_ptr initializaed above.
+ */
#if H5C_DO_SANITY_CHECKS
cache_ptr->slist_len_increase = 0;
@@ -825,6 +907,20 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
+ * Modifications:
+ *
+ * JRM -- 5/15/20
+ *
+ * Updated the function to enable the slist prior to the
+ * call to H5C__flush_invalidate_cache().
+ *
+ * Arguably, it shouldn't be necessary to re-enable the
+ * slist after the call to H5C__flush_invalidate_cache(), as
+ * the metadata cache should be discarded. However, in the
+ * test code, we make multiple calls to H5C_dest(). Thus
+ * we re-enable the slist on failure if it and the cache
+ * still exist.
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -845,33 +941,60 @@ H5C_dest(H5F_t * f)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats")
#endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
+ /* Enable the slist, as it is needed in the flush */
+ if ( H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
/* Flush and invalidate all cache entries */
- if(H5C__flush_invalidate_cache(f, H5C__NO_FLAGS_SET) < 0 )
+ if ( H5C__flush_invalidate_cache(f, H5C__NO_FLAGS_SET) < 0 )
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
/* Generate & write cache image if requested */
- if(cache_ptr->image_ctl.generate_image)
- if(H5C__generate_cache_image(f, cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image")
+ if ( cache_ptr->image_ctl.generate_image ) {
+
+ if ( H5C__generate_cache_image(f, cache_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, \
+ "Can't generate metadata cache image")
+ }
+
+ /* Question: Is it possible for cache_ptr->slist be non-null at this
+ * point? If no, shouldn't this if statement be an assert?
+ */
+ if ( cache_ptr->slist_ptr != NULL ) {
+
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
- if(cache_ptr->slist_ptr != NULL) {
H5SL_close(cache_ptr->slist_ptr);
+
cache_ptr->slist_ptr = NULL;
+
} /* end if */
if(cache_ptr->tag_list != NULL) {
+
H5SL_destroy(cache_ptr->tag_list, H5C_free_tag_list_cb, NULL);
cache_ptr->tag_list = NULL;
+
} /* end if */
- if(cache_ptr->log_info != NULL)
+ if(cache_ptr->log_info != NULL) {
+
H5MM_xfree(cache_ptr->log_info);
+ }
#ifndef NDEBUG
#if H5C_DO_SANITY_CHECKS
- if(cache_ptr->get_entry_ptr_from_addr_counter > 0)
- HDfprintf(stdout, "*** %ld calls to H5C_get_entry_ptr_from_add(). ***\n",
- cache_ptr->get_entry_ptr_from_addr_counter);
+
+ if ( cache_ptr->get_entry_ptr_from_addr_counter > 0 ) {
+
+ HDfprintf(stdout,
+ "*** %ld calls to H5C_get_entry_ptr_from_add(). ***\n",
+ cache_ptr->get_entry_ptr_from_addr_counter);
+ }
#endif /* H5C_DO_SANITY_CHECKS */
cache_ptr->magic = 0;
@@ -880,7 +1003,19 @@ H5C_dest(H5F_t * f)
cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
done:
+
+ if ( ( ret_value < 0 ) && ( cache_ptr ) && ( cache_ptr->slist_ptr ) ) {
+
+ /* need this for test code -- see change note for details */
+
+ if ( H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0 )
+
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "disable slist on flush dest failure failed")
+ }
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_dest() */
@@ -894,6 +1029,14 @@ done:
* Programmer: Vailin Choi
* Dec 2013
*
+ * Modifications:
+ *
+ * JRM -- 5/5/20
+ *
+ * Added code to enable the skip list prior to the call
+ * to H5C__flush_invalidate_cache(), and disable it
+ * afterwards.
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -906,9 +1049,22 @@ H5C_evict(H5F_t * f)
/* Sanity check */
HDassert(f);
+ /* Enable the slist, as it is needed in the flush */
+ if ( H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
/* Flush and invalidate all cache entries except the pinned entries */
- if(H5C__flush_invalidate_cache(f, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache")
+ if ( H5C__flush_invalidate_cache(f, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "unable to evict entries in the cache")
+
+ /* Disable the slist,
+ */
+ if ( H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist disabled failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2878,6 +3034,179 @@ done:
/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_set_slist_enabled()
+ *
+ * Purpose: Enable or disable the slist as directed.
+ *
+ * The slist (skip list) is an address ordered list of
+ * dirty entries in the metadata cache. However, this
+ * list is only needed during flush and close, where we
+ * use it to write entries in more or less increasing
+ * address order.
+ *
+ * This function sets up and enables further operations
+ * on the slist, or disable the slist. This in turn
+ * allows us to avoid the overhead of maintaining the
+ * slist when it is not needed.
+ *
+ *
+ * If the slist_enabled parameter is TRUE, the function
+ *
+ * 1) Verifies that the slist is empty.
+ *
+ * 2) Scans the index list, and inserts all dirty entries
+ * into the slist.
+ *
+ * 3) Sets cache_ptr->slist_enabled = TRUE.
+ *
+ * Note that the clear_slist parameter is ignored if
+ * the slist_enabed parameter is TRUE.
+ *
+ *
+ * If the slist_enabled_parameter is FALSE, the function
+ * shuts down the slist.
+ *
+ * Normally the slist will be empty at this point, however
+ * that need not be the case if H5C_flush_cache() has been
+ * called with the H5C__FLUSH_MARKED_ENTRIES_FLAG.
+ *
+ * Thus shutdown proceeds as follows:
+ *
+ * 1) Test to see if the slist is empty. If it is, proceed
+ * to step 3.
+ *
+ * 2) Test to see if the clear_slist parameter is TRUE.
+ *
+ * If it is, remove all entries from the slist.
+ *
+ * If it isn't, throw an error.
+ *
+ * 3) set cache_ptr->slist_enabled = FALSE.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 5/1/20
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled,
+ hbool_t clear_slist)
+{
+ H5C_cache_entry_t * entry_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if ( ( cache_ptr == NULL ) || ( cache_ptr->magic != H5C__H5C_T_MAGIC ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry")
+
+#if H5C__SLIST_OPT_ENABLED
+
+ if ( slist_enabled ) {
+
+ if ( cache_ptr->slist_enabled ) {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?")
+ }
+
+ if ( ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ) {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (1)?")
+ }
+
+
+ /* set cache_ptr->slist_enabled to TRUE so that the slist
+ * mainenance macros will be enabled.
+ */
+ cache_ptr->slist_enabled = TRUE;
+
+
+ /* scan the index list and insert all dirty entries in the slist */
+ entry_ptr = cache_ptr->il_head;
+
+ while ( entry_ptr != NULL ) {
+
+ HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
+
+ if ( entry_ptr->is_dirty ) {
+
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
+
+ entry_ptr = entry_ptr->il_next;
+ }
+
+ /* we don't maintain a dirty index len, so we can't do a cross
+ * check against it. Note that there is no point in cross checking
+ * against the dirty LRU size, as the dirty LRU may not be maintained,
+ * and in any case, there is no requirement that all dirty entries
+ * will reside on the dirty LRU.
+ */
+ HDassert( cache_ptr->dirty_index_size == cache_ptr->slist_size );
+
+ } else { /* take down the skip list */
+
+ if ( ! cache_ptr->slist_enabled ) {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?")
+ }
+
+ if ( ( cache_ptr->slist_len != 0 ) ||
+ ( cache_ptr->slist_size != 0 ) ) {
+
+ if ( clear_slist ) {
+
+ H5SL_node_t *node_ptr;
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ while ( node_ptr != NULL ) {
+
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ }
+ } else {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (2)?")
+ }
+ }
+
+ cache_ptr->slist_enabled = FALSE;
+
+ HDassert( 0 == cache_ptr->slist_len );
+ HDassert( 0 == cache_ptr->slist_size );
+ }
+
+#else /* H5C__SLIST_OPT_ENABLED is FALSE */
+
+ HDassert(cache_ptr->slist_enabled);
+
+#endif /* H5C__SLIST_OPT_ENABLED is FALSE */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_set_slist_enabled() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_unpin_entry()
*
* Purpose: Unpin a cache entry. The entry can be either protected or
@@ -2888,8 +3217,8 @@ done:
* Programmer: John Mainzer
* 3/22/06
*
- * Changes: Added extreme sanity checks on entry and exit.
- JRM -- 4/26/14
+ * Changes: Added extreme sanity checks on entry and exit.
+ * JRM -- 4/26/14
*
*-------------------------------------------------------------------------
*/
@@ -2955,6 +3284,81 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated for the addition of the hash table.
+ *
+ * JRM -- 10/28/04
+ * Added code to set cache_full to TRUE whenever we try to
+ * make space in the cache.
+ *
+ * JRM -- 11/12/04
+ * Added code to call to H5C_make_space_in_cache() after the
+ * call to H5C__auto_adjust_cache_size() if that function
+ * sets the size_decreased flag is TRUE.
+ *
+ * JRM -- 4/25/05
+ * The size_decreased flag can also be set to TRUE in
+ * H5C_set_cache_auto_resize_config() if a new configuration
+ * forces an immediate reduction in cache size. Modified
+ * the code to deal with this eventuallity.
+ *
+ * JRM -- 6/24/05
+ * Added support for the new write_permitted field of H5C_t.
+ *
+ * JRM -- 10/22/05
+ * Hand optimizations.
+ *
+ * JRM -- 5/3/06
+ * Added code to set the new dirtied field in
+ * H5C_cache_entry_t to FALSE prior to return.
+ *
+ * JRM -- 6/23/06
+ * Modified code to allow dirty entries to be loaded from
+ * disk. This is necessary as a bug fix in the object
+ * header code requires us to modify a header as it is read.
+ *
+ * JRM -- 3/28/07
+ * Added the flags parameter and supporting code. At least
+ * for now, this parameter is used to allow the entry to
+ * be protected read only, thus allowing multiple protects.
+ *
+ * Also added code to allow multiple read only protects
+ * of cache entries.
+ *
+ * JRM -- 7/27/07
+ * Added code supporting the new evictions_enabled field
+ * in H5C_t.
+ *
+ * JRM -- 1/3/08
+ * Added to do a flash cache size increase if appropriate
+ * when a large entry is loaded.
+ *
+ * JRM -- 11/13/08
+ * Modified function to call H5C_make_space_in_cache() when
+ * the min_clean_size is violated, not just when there isn't
+ * enough space for and entry that has just been loaded.
+ *
+ * The purpose of this modification is to avoid "metadata
+ * blizzards" in the write only case. In such instances,
+ * the cache was allowed to fill with dirty metadata. When
+ * we finally needed to evict an entry to make space, we had
+ * to flush out a whole cache full of metadata -- which has
+ * interesting performance effects. We hope to avoid (or
+ * perhaps more accurately hide) this effect by maintaining
+ * the min_clean_size, which should force us to start flushing
+ * entries long before we actually have to evict something
+ * to make space.
+ *
+ *
+ * Missing entries?
+ *
+ *
+ * JRM -- 5/8/20
+ * Updated for the possibility that the slist will be
+ * disabled.
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2996,9 +3400,15 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
HDassert( ! ( pin_entry && unpin_entry ) );
- HDassert( ( ! free_file_space ) || ( deleted ) ); /* deleted flag must accompany free_file_space */
- HDassert( ( ! take_ownership ) || ( deleted ) ); /* deleted flag must accompany take_ownership */
- HDassert( ! ( free_file_space && take_ownership ) ); /* can't have both free_file_space & take_ownership */
+
+ /* deleted flag must accompany free_file_space */
+ HDassert( ( ! free_file_space ) || ( deleted ) );
+
+ /* deleted flag must accompany take_ownership */
+ HDassert( ( ! take_ownership ) || ( deleted ) );
+
+ /* can't have both free_file_space & take_ownership */
+ HDassert( ! ( free_file_space && take_ownership ) );
entry_ptr = (H5C_cache_entry_t *)thing;
@@ -3011,48 +3421,66 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
was_clean = ! ( entry_ptr->is_dirty );
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* if the entry has multiple read only protects, just decrement
* the ro_ref_counter. Don't actually unprotect until the ref count
* drops to zero.
*/
- if(entry_ptr->ro_ref_count > 1) {
+ if ( entry_ptr->ro_ref_count > 1 ) {
+
/* Sanity check */
- HDassert(entry_ptr->is_protected);
+ HDassert(entry_ptr->is_protected);
HDassert(entry_ptr->is_read_only);
- if(dirtied)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
+ if ( dirtied )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Read only entry modified??")
/* Reduce the RO ref count */
- (entry_ptr->ro_ref_count)--;
+ (entry_ptr->ro_ref_count)--;
/* Pin or unpin the entry as requested. */
- if(pin_entry) {
+ if ( pin_entry ) {
+
/* Pin the entry from a client */
- if(H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
- } else if(unpin_entry) {
+ if ( H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \
+ "Can't pin entry by client")
+
+ } else if ( unpin_entry ) {
+
/* Unpin the entry from a client */
- if(H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
+ if ( H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \
+ "Can't unpin entry by client")
+
} /* end if */
} else {
- if(entry_ptr->is_read_only) {
+
+ if ( entry_ptr->is_read_only ) {
+
/* Sanity check */
- HDassert(entry_ptr->ro_ref_count == 1);
+ HDassert(entry_ptr->ro_ref_count == 1);
- if(dirtied)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Read only entry modified??")
+ if ( dirtied )
- entry_ptr->is_read_only = FALSE;
- entry_ptr->ro_ref_count = 0;
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Read only entry modified??")
+
+ entry_ptr->is_read_only = FALSE;
+ entry_ptr->ro_ref_count = 0;
+
+ } /* end if */
#ifdef H5_HAVE_PARALLEL
/* When the H5C code is used to implement the metadata cache in the
@@ -3080,63 +3508,102 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
} /* end if */
#endif /* H5_HAVE_PARALLEL */
- if(!entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Entry already unprotected??")
+ if ( ! entry_ptr->is_protected )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Entry already unprotected??")
/* Mark the entry as dirty if appropriate */
entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
- if(dirtied)
- if(entry_ptr->image_up_to_date) {
- entry_ptr->image_up_to_date = FALSE;
- if(entry_ptr->flush_dep_nparents > 0)
- if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+ if ( dirtied ) {
+
+ if ( entry_ptr->image_up_to_date ) {
+
+ entry_ptr->image_up_to_date = FALSE;
+
+ if ( entry_ptr->flush_dep_nparents > 0 ) {
+
+ if ( H5C__mark_flush_dep_unserialized(entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "Can't propagate serialization status to fd parents")
+
+ } /* end if */
} /* end if */
+ } /* end if */
/* Check for newly dirtied entry */
- if(was_clean && entry_ptr->is_dirty) {
+ if ( was_clean && entry_ptr->is_dirty ) {
+
/* Update index for newly dirtied entry */
H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
- /* If the entry's type has a 'notify' callback send a 'entry dirtied'
- * notice now that the entry is fully integrated into the cache.
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry dirtied' notice now that the entry is fully
+ * integrated into the cache.
*/
- if(entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED,
+ entry_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client about entry dirty flag set")
/* Propagate the flush dep dirty flag up the flush dependency chain
- * if appropriate */
- if(entry_ptr->flush_dep_nparents > 0)
- if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ * if appropriate
+ */
+ if ( entry_ptr->flush_dep_nparents > 0 ) {
+
+ if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "Can't propagate flush dep dirty flag")
+ }
} /* end if */
/* Check for newly clean entry */
- else if(!was_clean && !entry_ptr->is_dirty) {
- /* If the entry's type has a 'notify' callback send a 'entry cleaned'
- * notice now that the entry is fully integrated into the cache.
+ else if ( ! was_clean && ! entry_ptr->is_dirty ) {
+
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
*/
- if(entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED,
+ entry_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client about entry dirty flag cleared")
/* Propagate the flush dep clean flag up the flush dependency chain
- * if appropriate */
- if(entry_ptr->flush_dep_nparents > 0)
- if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ * if appropriate
+ */
+ if ( entry_ptr->flush_dep_nparents > 0 ) {
+
+ if ( H5C__mark_flush_dep_clean(entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, \
+ "Can't propagate flush dep dirty flag")
+
+ }
} /* end else-if */
/* Pin or unpin the entry as requested. */
- if(pin_entry) {
+ if ( pin_entry ) {
+
/* Pin the entry from a client */
- if(H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
- } else if(unpin_entry) {
+ if ( H5C__pin_entry_from_client(cache_ptr, entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, \
+ "Can't pin entry by client")
+
+ } else if ( unpin_entry ) {
+
/* Unpin the entry from a client */
- if(H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
+ if ( H5C__unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, \
+ "Can't unpin entry by client")
} /* end if */
/* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on
@@ -3149,10 +3616,15 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
/* if the entry is dirty, 'or' its flush_marker with the set flush flag,
* and then add it to the skip list if it isn't there already.
*/
- if(entry_ptr->is_dirty) {
+ if ( entry_ptr->is_dirty ) {
+
entry_ptr->flush_marker |= set_flush_marker;
- if(!entry_ptr->in_slist)
+
+ if ( !entry_ptr->in_slist ) {
+
+ /* this is a no-op if cache_ptr->slist_enabled is FALSE */
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
} /* end if */
/* this implementation of the "deleted" option is a bit inefficient, as
@@ -3164,44 +3636,71 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
* makes good use of existing code.
* JRM - 5/19/04
*/
- if(deleted) {
- unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG |
- H5C__FLUSH_INVALIDATE_FLAG);
+ if ( deleted ) {
+
+ unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG);
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
- if(test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
- else if(test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
+
+ if ( test_entry_ptr == NULL )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "entry not in hash table?!?")
+
+ else if ( test_entry_ptr != entry_ptr )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "hash table contains multiple entries for addr?!?")
/* Set the 'free file space' flag for the flush, if needed */
- if(free_file_space)
+ if ( free_file_space ) {
+
flush_flags |= H5C__FREE_FILE_SPACE_FLAG;
+ }
/* Set the "take ownership" flag for the flush, if needed */
- if(take_ownership)
+ if ( take_ownership ) {
+
flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
+ }
/* Delete the entry from the skip list on destroy */
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
- HDassert(((!was_clean) || dirtied) == entry_ptr->in_slist);
- if(H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
+ HDassert( ( ! cache_ptr->slist_enabled ) || \
+ ( ( ( ! was_clean ) || dirtied ) == \
+ ( entry_ptr->in_slist ) ) );
+
+ if ( H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Can't flush entry")
+
} /* end if */
#ifdef H5_HAVE_PARALLEL
- else if(clear_entry) {
+ else if ( clear_entry ) {
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
- if(test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
- else if(test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
- if(H5C__flush_single_entry(f, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
+ if ( test_entry_ptr == NULL )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "entry not in hash table?!?")
+
+ else if ( test_entry_ptr != entry_ptr )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "hash table contains multiple entries for addr?!?")
+
+ if ( H5C__flush_single_entry(f, entry_ptr,
+ H5C__FLUSH_CLEAR_ONLY_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
+ "Can't clear entry")
+
} /* end else if */
#endif /* H5_HAVE_PARALLEL */
}
@@ -3209,14 +3708,18 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
done:
+
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0)) {
- HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
+
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_unprotect() */
@@ -5028,6 +5531,7 @@ done:
/*-------------------------------------------------------------------------
+ *
* Function: H5C__flush_invalidate_cache
*
* Purpose: Flush and destroy the entries contained in the target
@@ -5057,6 +5561,51 @@ done:
* Programmer: John Mainzer
* 3/24/065
*
+ * Modifications:
+ *
+ * To support the fractal heap, the cache must now deal with
+ * entries being dirtied, resized, and/or renamed inside
+ * flush callbacks. Updated function to support this.
+ *
+ * -- JRM 8/27/06
+ *
+ * Added code to detect and manage the case in which a
+ * flush callback changes the s-list out from under
+ * the function. The only way I can think of in which this
+ * can happen is if a flush function loads an entry
+ * into the cache that isn't there already. Quincey tells
+ * me that this will never happen, but I'm not sure I
+ * believe him.
+ *
+ * Note that this is a pretty bad scenario if it ever
+ * happens. The code I have added should allow us to
+ * handle the situation under all but the worst conditions,
+ * but one can argue that we should just scream and die if
+ * we ever detect the condition.
+ *
+ * -- JRM 10/13/07
+ *
+ * Missing entries?
+ *
+ *
+ * Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
+ * This flag is used to flush and evict all entries in
+ * the metadata cache that are not pinned -- typically,
+ * everything other than the superblock.
+ *
+ * ??? -- ??/??/??
+ *
+ * Added sanity checks to verify that the skip list is
+ * enabled on entry. On the face of it, it would make
+ * sense to enable the slist on entry, and disable it
+ * on exit, as this function is not called repeatedly.
+ * However, since this function can be called from
+ * H5C_flush_cache(), this would create cases in the test
+ * code where we would have to check the flags to determine
+ * whether we must setup and take down the slist.
+ *
+ * JRM -- 5/5/20
+ *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -5074,6 +5623,7 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr->slist_ptr);
+ HDassert(cache_ptr->slist_enabled);
#if H5C_DO_SANITY_CHECKS
{
@@ -5092,7 +5642,8 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ for ( i = H5C_RING_USER; i < H5C_RING_NTYPES; i++ ) {
+
index_len += cache_ptr->index_ring_len[i];
index_size += cache_ptr->index_ring_size[i];
clean_index_size += cache_ptr->clean_index_ring_size[i];
@@ -5100,6 +5651,7 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
slist_len += cache_ptr->slist_ring_len[i];
slist_size += cache_ptr->slist_ring_size[i];
+
} /* end for */
HDassert(cache_ptr->index_len == index_len);
@@ -5112,49 +5664,68 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
#endif /* H5C_DO_SANITY_CHECKS */
/* remove ageout markers if present */
- if(cache_ptr->epoch_markers_active > 0)
- if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
+ if ( cache_ptr->epoch_markers_active > 0 ) {
+
+ if ( H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "error removing all epoch markers")
+ }
/* flush invalidate each ring, starting from the outermost ring and
* working inward.
*/
ring = H5C_RING_USER;
- while(ring < H5C_RING_NTYPES) {
+
+ while ( ring < H5C_RING_NTYPES) {
+
if(H5C_flush_invalidate_ring(f, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "flush invalidate ring failed")
ring++;
+
} /* end while */
/* Invariants, after destroying all entries in the hash table */
- if(!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
+ if( ! ( flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG ) ) {
+
HDassert(cache_ptr->index_size == 0);
HDassert(cache_ptr->clean_index_size == 0);
HDassert(cache_ptr->pel_len == 0);
HDassert(cache_ptr->pel_size == 0);
+
} /* end if */
else {
+
H5C_cache_entry_t *entry_ptr; /* Cache entry */
unsigned u; /* Local index variable */
/* All rings except ring 4 should be empty now */
/* (Ring 4 has the superblock) */
- for(u = H5C_RING_USER; u < H5C_RING_SB; u++) {
+ for ( u = H5C_RING_USER; u < H5C_RING_SB; u++ ) {
+
HDassert(cache_ptr->index_ring_len[u] == 0);
HDassert(cache_ptr->index_ring_size[u] == 0);
HDassert(cache_ptr->clean_index_ring_size[u] == 0);
+
} /* end for */
/* Check that any remaining pinned entries are in the superblock ring */
+
entry_ptr = cache_ptr->pel_head_ptr;
+
while(entry_ptr) {
+
/* Check ring */
HDassert(entry_ptr->ring == H5C_RING_SB);
/* Advance to next entry in pinned entry list */
entry_ptr = entry_ptr->next;
+
} /* end while */
} /* end else */
+
HDassert(cache_ptr->dirty_index_size == 0);
HDassert(cache_ptr->slist_len == 0);
HDassert(cache_ptr->slist_size == 0);
@@ -5164,43 +5735,59 @@ H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
HDassert(cache_ptr->LRU_list_size == 0);
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C__flush_invalidate_cache() */
/*-------------------------------------------------------------------------
* Function: H5C_flush_invalidate_ring
*
- * Purpose: Flush and destroy the entries contained in the target
- * cache and ring.
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache and ring.
*
- * If the ring contains protected entries, the function will
- * fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
- * flushed and destroyed before the function returns failure.
+ * If the ring contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
*
- * While pinned entries can usually be flushed, they cannot
- * be destroyed. However, they should be unpinned when all
- * the entries that reference them have been destroyed (thus
- * reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
*
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
- * (including the pinned dirty entries where permitted) and
- * destroying all unpinned entries. This process is repeated
- * until either the cache is empty, or the number of pinned
- * entries stops decreasing on each pass.
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
*
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the cache flushing
- * entries in flush dependency order.
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the cache flushing
+ * entries in flush dependency order.
*
* Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
+ * a request to flush all items and something was protected.
*
* Programmer: John Mainzer
- * 9/1/15
+ * 9/1/15
+ *
+ * Changes: Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
+ * This flag is used to flush and evict all entries in
+ * the metadata cache that are not pinned -- typically,
+ * everything other than the superblock.
+ *
+ * ??? -- ??/??/??
+ *
+ * A recent optimization turns off the slist unless a flush
+ * is in progress. This should not effect this function, as
+ * it is only called during a flush. Added an assertion to
+ * verify this.
+ *
+ * JRM -- 5/6/20
*
*-------------------------------------------------------------------------
*/
@@ -5228,9 +5815,12 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
HDassert(f);
HDassert(f->shared);
+
cache_ptr = f->shared->cache;
+
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
HDassert(cache_ptr->slist_ptr);
HDassert(ring > H5C_RING_UNDEFINED);
HDassert(ring < H5C_RING_NTYPES);
@@ -5272,19 +5862,25 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
*/
/* compute the number of pinned entries in this ring */
+
entry_ptr = cache_ptr->pel_head_ptr;
cur_ring_pel_len = 0;
- while(entry_ptr != NULL) {
+
+ while ( entry_ptr != NULL ) {
+
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->ring >= ring);
if(entry_ptr->ring == ring)
cur_ring_pel_len++;
entry_ptr = entry_ptr->next;
+
} /* end while */
old_ring_pel_len = cur_ring_pel_len;
+
while(cache_ptr->index_ring_len[ring] > 0) {
+
/* first, try to flush-destroy any dirty entries. Do this by
* making a scan through the slist. Note that new dirty entries
* may be created by the flush call backs. Thus it is possible
@@ -5327,25 +5923,33 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
/* this done, start the scan of the slist */
restart_slist_scan = TRUE;
- while(restart_slist_scan || (node_ptr != NULL)) {
- if(restart_slist_scan) {
+
+ while ( restart_slist_scan || ( node_ptr != NULL ) ) {
+
+ if ( restart_slist_scan ) {
+
restart_slist_scan = FALSE;
/* Start at beginning of skip list */
node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if(node_ptr == NULL)
+
+ if ( node_ptr == NULL )
/* the slist is empty -- break out of inner loop */
break;
/* Get cache entry for this node */
next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ if ( NULL == next_entry_ptr )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL ?!?!")
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
HDassert(next_entry_ptr->ring >= ring);
+
} /* end if */
entry_ptr = next_entry_ptr;
@@ -5371,18 +5975,26 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
* from the slist.
*/
node_ptr = H5SL_next(node_ptr);
+
if(node_ptr != NULL) {
+
next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL ?!?!")
+
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
HDassert(next_entry_ptr->ring >= ring);
HDassert(entry_ptr != next_entry_ptr);
} /* end if */
- else
+ else {
+
next_entry_ptr = NULL;
+ }
/* Note that we now remove nodes from the slist as we flush
* the associated entries, instead of leaving them there
@@ -5394,22 +6006,31 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
* flush, we must keep the slist in canonical form at all
* times.
*/
- if(((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->flush_dep_nchildren == 0) &&
- (entry_ptr->ring == ring)) {
- if(entry_ptr->is_protected) {
+ if ( ( ( !entry_ptr->flush_me_last ) ||
+ ( ( entry_ptr->flush_me_last ) &&
+ ( cache_ptr->num_last_entries >= cache_ptr->slist_len ) )
+ ) &&
+ ( entry_ptr->flush_dep_nchildren == 0 ) &&
+ ( entry_ptr->ring == ring ) ) {
+
+ if ( entry_ptr->is_protected ) {
+
/* we have major problems -- but lets flush
* everything we can before we flag an error.
*/
protected_entries++;
+
} /* end if */
- else if(entry_ptr->is_pinned) {
- if(H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
+ else if ( entry_ptr->is_pinned ) {
+
+ if ( H5C__flush_single_entry(f, entry_ptr,
+ H5C__DURING_FLUSH_FLAG) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "dirty pinned entry flush failed")
+
+ if ( cache_ptr->slist_changed ) {
- if(cache_ptr->slist_changed) {
/* The slist has been modified by something
* other than the simple removal of the
* of the flushed entry after the flush.
@@ -5420,13 +6041,22 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
restart_slist_scan = TRUE;
cache_ptr->slist_changed = FALSE;
H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
+
} /* end if */
} /* end else-if */
else {
- if(H5C__flush_single_entry(f, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
- if(cache_ptr->slist_changed) {
+ if ( H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags |
+ H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) ) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "dirty entry flush destroy failed")
+
+ if ( cache_ptr->slist_changed ) {
+
/* The slist has been modified by something
* other than the simple removal of the
* of the flushed entry after the flush.
@@ -5452,9 +6082,15 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
* out from under us.
*/
- if(node_ptr == NULL) {
- HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
- HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
+ if ( node_ptr == NULL ) {
+
+ HDassert(cache_ptr->slist_len ==
+ (uint32_t)((int32_t)initial_slist_len +
+ cache_ptr->slist_len_increase));
+
+ HDassert(cache_ptr->slist_size ==
+ (size_t)((ssize_t)initial_slist_size +
+ cache_ptr->slist_size_increase));
} /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -5478,7 +6114,9 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
cache_ptr->entries_relocated_counter = 0;
next_entry_ptr = cache_ptr->il_head;
- while(next_entry_ptr != NULL) {
+
+ while ( next_entry_ptr != NULL ) {
+
entry_ptr = next_entry_ptr;
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->ring >= ring);
@@ -5487,18 +6125,28 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
HDassert((next_entry_ptr == NULL) ||
(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
- if((!entry_ptr->flush_me_last || (entry_ptr->flush_me_last && cache_ptr->num_last_entries >= cache_ptr->slist_len))
- && entry_ptr->flush_dep_nchildren == 0 && entry_ptr->ring == ring) {
- if(entry_ptr->is_protected) {
+ if ( ( ( ! entry_ptr->flush_me_last ) ||
+ ( entry_ptr->flush_me_last &&
+ ( cache_ptr->num_last_entries >= cache_ptr->slist_len ) )
+ ) &&
+ ( entry_ptr->flush_dep_nchildren == 0 ) &&
+ ( entry_ptr->ring == ring ) ) {
+
+ if ( entry_ptr->is_protected ) {
+
/* we have major problems -- but lets flush and
* destroy everything we can before we flag an
* error.
*/
protected_entries++;
- if(!entry_ptr->in_slist)
+
+ if ( ! entry_ptr->in_slist ) {
+
HDassert(!(entry_ptr->is_dirty));
+ }
} /* end if */
- else if(!(entry_ptr->is_pinned)) {
+ else if ( ! ( entry_ptr->is_pinned ) ) {
+
/* if *entry_ptr is dirty, it is possible
* that one or more other entries may be
* either removed from the cache, loaded
@@ -5527,8 +6175,14 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
*/
cache_ptr->entry_watched_for_removal = next_entry_ptr;
- if(H5C__flush_single_entry(f, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
+ if ( H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags |
+ H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Entry flush destroy failed")
/* Restart the index list scan if necessary. Must
* do this if the next entry is evicted, and also if
@@ -5538,10 +6192,12 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
* if this results in the size of the pinned entry
* failing to decline during the pass.
*/
- if((NULL != next_entry_ptr && NULL == cache_ptr->entry_watched_for_removal)
- || (cache_ptr->entries_loaded_counter > 0)
- || (cache_ptr->entries_inserted_counter > 0)
- || (cache_ptr->entries_relocated_counter > 0)) {
+ if ( ( ( NULL != next_entry_ptr ) &&
+ ( NULL == cache_ptr->entry_watched_for_removal )
+ ) ||
+ ( cache_ptr->entries_loaded_counter > 0 ) ||
+ ( cache_ptr->entries_inserted_counter > 0 ) ||
+ ( cache_ptr->entries_relocated_counter > 0 ) ) {
next_entry_ptr = cache_ptr->il_head;
@@ -5550,9 +6206,12 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
cache_ptr->entries_relocated_counter = 0;
H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
+
} /* end if */
- else
+ else {
+
cache_ptr->entry_watched_for_removal = NULL;
+ }
} /* end if */
} /* end if */
} /* end for loop scanning hash table */
@@ -5568,35 +6227,53 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
old_ring_pel_len = cur_ring_pel_len;
entry_ptr = cache_ptr->pel_head_ptr;
cur_ring_pel_len = 0;
- while(entry_ptr != NULL) {
+
+ while ( entry_ptr != NULL ) {
+
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->ring >= ring);
- if(entry_ptr->ring == ring)
+ if ( entry_ptr->ring == ring ) {
+
cur_ring_pel_len++;
+ }
entry_ptr = entry_ptr->next;
+
} /* end while */
/* Check if the number of pinned entries in the ring is positive, and
* it is not declining. Scream and die if so.
*/
- if(cur_ring_pel_len > 0 && cur_ring_pel_len >= old_ring_pel_len) {
+ if ( ( cur_ring_pel_len > 0 ) &&
+ ( cur_ring_pel_len >= old_ring_pel_len ) ) {
+
/* Don't error if allowed to have pinned entries remaining */
- if(evict_flags)
+ if ( evict_flags ) {
+
HGOTO_DONE(TRUE)
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", \
+ (int)cur_ring_pel_len, \
+ (int)old_ring_pel_len, (int)ring)
} /* end if */
HDassert(protected_entries == cache_ptr->pl_len);
- if(protected_entries > 0 && protected_entries == cache_ptr->index_len)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Only protected entries left in cache, protected_entries = %d", (int)protected_entries)
+ if ( ( protected_entries > 0 ) &&
+ ( protected_entries == cache_ptr->index_len ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Only protected entries left in cache, protected_entries = %d",\
+ (int)protected_entries)
+
} /* main while loop */
/* Invariants, after destroying all entries in the ring */
- for(i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
+ for ( i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++ ) {
+
HDassert(cache_ptr->index_ring_len[i] == 0);
HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
@@ -5604,41 +6281,59 @@ H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
HDassert(cache_ptr->slist_ring_len[i] == 0);
HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
+
} /* end for */
HDassert(protected_entries <= cache_ptr->pl_len);
- if(protected_entries > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
- else if(cur_ring_pel_len > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
+ if ( protected_entries > 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Cache has protected entries")
+
+ } else if ( cur_ring_pel_len > 0 ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't unpin all pinned entries in ring")
+ }
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_flush_invalidate_ring() */
/*-------------------------------------------------------------------------
+ *
* Function: H5C__flush_ring
*
- * Purpose: Flush the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been flushed on entry.
+ * Purpose: Flush the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been flushed on entry.
*
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be flushed. However all unprotected entries in the target
- * ring should be flushed before the function returns failure.
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be flushed. However all unprotected entries in the target
+ * ring should be flushed before the function returns failure.
*
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the slist flushing
- * entries in flush dependency order.
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the slist flushing
+ * entries in flush dependency order.
*
* Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
+ * a request to flush all items and something was protected.
*
* Programmer: John Mainzer
- * 9/1/15
+ * 9/1/15
+ *
+ * Changes: A recent optimization turns off the slist unless a flush
+ * is in progress. This should not effect this function, as
+ * it is only called during a flush. Added an assertion to
+ * verify this.
+ *
+ * JRM -- 5/6/20
+ *
*
*-------------------------------------------------------------------------
*/
@@ -5666,24 +6361,31 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
HDassert(cache_ptr->slist_ptr);
HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
HDassert(ring > H5C_RING_UNDEFINED);
HDassert(ring < H5C_RING_NTYPES);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
- (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
+ ( H5C_validate_pinned_entry_list(cache_ptr ) < 0 ) ||
+ ( H5C_validate_lru_list(cache_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
flush_marked_entries = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 );
- if(!flush_marked_entries)
- for(i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
- HDassert(cache_ptr->slist_ring_len[i] == 0);
+ if ( ! flush_marked_entries ) {
+
+ for ( i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++ ) {
+
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ }
+ }
HDassert(cache_ptr->flush_in_progress);
@@ -5704,9 +6406,10 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
*/
cache_ptr->slist_changed = FALSE;
- while((cache_ptr->slist_ring_len[ring] > 0) &&
- (protected_entries == 0) &&
- (flushed_entries_last_pass)) {
+ while ( ( cache_ptr->slist_ring_len[ring] > 0 ) &&
+ ( protected_entries == 0 ) &&
+ ( flushed_entries_last_pass ) ) {
+
flushed_entries_last_pass = FALSE;
#if H5C_DO_SANITY_CHECKS
@@ -5750,26 +6453,33 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
restart_slist_scan = TRUE;
- while((restart_slist_scan ) || (node_ptr != NULL)) {
- if(restart_slist_scan) {
+ while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) {
+
+ if ( restart_slist_scan ) {
+
restart_slist_scan = FALSE;
/* Start at beginning of skip list */
node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if(node_ptr == NULL)
+ if ( node_ptr == NULL ) {
+
/* the slist is empty -- break out of inner loop */
break;
+ }
/* Get cache entry for this node */
next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ if ( NULL == next_entry_ptr )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL ?!?!")
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
+
} /* end if */
entry_ptr = next_entry_ptr;
@@ -5794,54 +6504,76 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->in_slist);
HDassert(entry_ptr->is_dirty);
- if(!flush_marked_entries || entry_ptr->flush_marker)
+
+ if ( ( ! flush_marked_entries ) || ( entry_ptr->flush_marker ) ) {
+
HDassert(entry_ptr->ring >= ring);
+ }
/* Advance node pointer now, before we delete its target
* from the slist.
*/
node_ptr = H5SL_next(node_ptr);
- if(node_ptr != NULL) {
+
+ if ( node_ptr != NULL ) {
+
next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ if ( NULL == next_entry_ptr )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "next_entry_ptr == NULL ?!?!")
HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(next_entry_ptr->is_dirty);
HDassert(next_entry_ptr->in_slist);
- if(!flush_marked_entries || next_entry_ptr->flush_marker)
+ if ( ! flush_marked_entries || next_entry_ptr->flush_marker ) {
+
HDassert(next_entry_ptr->ring >= ring);
+ }
HDassert(entry_ptr != next_entry_ptr);
+
} /* end if */
- else
+ else {
+
next_entry_ptr = NULL;
+ }
- if((!flush_marked_entries || entry_ptr->flush_marker)
- && (!entry_ptr->flush_me_last ||
- (entry_ptr->flush_me_last
- && (cache_ptr->num_last_entries >= cache_ptr->slist_len
- || (flush_marked_entries && entry_ptr->flush_marker))))
- && (entry_ptr->flush_dep_nchildren == 0
- || entry_ptr->flush_dep_ndirty_children == 0)
- && entry_ptr->ring == ring) {
+ if ( ( ! flush_marked_entries || entry_ptr->flush_marker ) &&
+ ( ( ! entry_ptr->flush_me_last ) ||
+ ( ( entry_ptr->flush_me_last ) &&
+ ( ( cache_ptr->num_last_entries >= cache_ptr->slist_len )||
+ ( flush_marked_entries && entry_ptr->flush_marker ) )
+ )
+ ) &&
+ ( ( entry_ptr->flush_dep_nchildren == 0 ) ||
+ ( entry_ptr->flush_dep_ndirty_children == 0 ) ) &&
+ ( entry_ptr->ring == ring ) ) {
HDassert(entry_ptr->flush_dep_nunser_children == 0);
- if(entry_ptr->is_protected) {
+ if ( entry_ptr->is_protected ) {
+
/* we probably have major problems -- but lets
* flush everything we can before we decide
* whether to flag an error.
*/
tried_to_flush_protected_entry = TRUE;
protected_entries++;
+
} /* end if */
else {
- if(H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
- if(cache_ptr->slist_changed) {
+ if ( H5C__flush_single_entry(f, entry_ptr,
+ (flags | H5C__DURING_FLUSH_FLAG)) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't flush entry")
+
+ if ( cache_ptr->slist_changed ) {
+
/* The slist has been modified by something
* other than the simple removal of the
* of the flushed entry after the flush.
@@ -5852,34 +6584,46 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
restart_slist_scan = TRUE;
cache_ptr->slist_changed = FALSE;
H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+
} /* end if */
flushed_entries_last_pass = TRUE;
+
} /* end else */
} /* end if */
} /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
- HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
- HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
+ HDassert((uint32_t)((int32_t)initial_slist_len + \
+ cache_ptr->slist_len_increase) == cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + \
+ cache_ptr->slist_size_increase) == cache_ptr->slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
+
} /* while */
HDassert(protected_entries <= cache_ptr->pl_len);
- if(((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
+ if ( ( ( cache_ptr->pl_len > 0 ) && ( ! ignore_protected ) ) ||
+ ( tried_to_flush_protected_entry ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "cache has protected items")
#if H5C_DO_SANITY_CHECKS
- if(!flush_marked_entries) {
+ if ( ! flush_marked_entries ) {
+
HDassert(cache_ptr->slist_ring_len[ring] == 0);
HDassert(cache_ptr->slist_ring_size[ring] == 0);
+
} /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C__flush_ring() */
@@ -5888,53 +6632,110 @@ done:
* Function: H5C__flush_single_entry
*
* Purpose: Flush or clear (and evict if requested) the cache entry
- * with the specified address and type. If the type is NULL,
- * any unprotected entry at the specified address will be
- * flushed (and possibly evicted).
+ * with the specified address and type. If the type is NULL,
+ * any unprotected entry at the specified address will be
+ * flushed (and possibly evicted).
*
- * Attempts to flush a protected entry will result in an
- * error.
+ * Attempts to flush a protected entry will result in an
+ * error.
*
- * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
- * be cleared and not flushed, and the call can't be part of a
+ * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
+ * be cleared and not flushed, and the call can't be part of a
* sequence of flushes.
*
- * If the caller knows the address of the skip list node at
- * which the target entry resides, it can avoid a lookup
- * by supplying that address in the tgt_node_ptr parameter.
- * If this parameter is NULL, the function will do a skip list
- * search for the entry instead.
- *
- * The function does nothing silently if there is no entry
- * at the supplied address, or if the entry found has the
- * wrong type.
+ * The function does nothing silently if there is no entry
+ * at the supplied address, or if the entry found has the
+ * wrong type.
*
* Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
+ * an attempt to flush a protected item.
*
* Programmer: John Mainzer, 5/5/04
*
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * QAK -- 11/26/04
+ * Updated function for the switch from TBBTs to skip lists.
+ *
+ * JRM -- 1/6/05
+ * Updated function to reset the flush_marker field.
+ * Also replace references to H5F_FLUSH_INVALIDATE and
+ * H5F_FLUSH_CLEAR_ONLY with references to
+ * H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG
+ * respectively.
+ *
+ * JRM -- 6/24/05
+ * Added code to remove dirty entries from the slist after
+ * they have been flushed. Also added a sanity check that
+ * will scream if we attempt a write when writes are
+ * completely disabled.
+ *
+ * JRM -- 7/5/05
+ * Added code to call the new log_flush callback whenever
+ * a dirty entry is written to disk. Note that the callback
+ * is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set,
+ * as there is no write to file in this case.
+ *
+ * JRM -- 8/21/06
+ * Added code maintaining the flush_in_progress and
+ * destroy_in_progress fields in H5C_cache_entry_t.
+ *
+ * Also added flush_flags parameter to the call to
+ * type_ptr->flush() so that the flush routine can report
+ * whether the entry has been resized or renamed. Added
+ * code using the flush_flags variable to detect the case
+ * in which the target entry is resized during flush, and
+ * update the caches data structures accordingly.
+ *
+ * JRM -- 3/29/07
+ * Added sanity checks on the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * QAK -- 2/07/08
+ * Separated "destroy entry" concept from "remove entry from
+ * cache" concept, by adding the 'take_ownership' flag and
+ * the "destroy_entry" variable.
+ *
+ * JRM -- 11/5/08
+ * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
+ * maintain the new clean_index_size and clean_index_size
+ * fields of H5C_t.
+ *
+ *
+ * Missing entries??
+ *
+ *
+ * JRM -- 5/8/20
+ * Updated sanity checks for the possibility that the slist
+ * is disabled.
+ *
+ * Also updated main comment to conform more closely with
+ * the current state of the code.
+ *
*-------------------------------------------------------------------------
*/
herr_t
H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
{
- H5C_t * cache_ptr; /* Cache for file */
- hbool_t destroy; /* external flag */
- hbool_t clear_only; /* external flag */
- hbool_t free_file_space; /* external flag */
- hbool_t take_ownership; /* external flag */
- hbool_t del_from_slist_on_destroy; /* external flag */
- hbool_t during_flush; /* external flag */
- hbool_t write_entry; /* internal flag */
- hbool_t destroy_entry; /* internal flag */
- hbool_t generate_image; /* internal flag */
- hbool_t update_page_buffer; /* internal flag */
- hbool_t was_dirty;
- hbool_t suppress_image_entry_writes = FALSE;
- hbool_t suppress_image_entry_frees = FALSE;
- haddr_t entry_addr = HADDR_UNDEF;
- herr_t ret_value = SUCCEED; /* Return value */
+ H5C_t * cache_ptr; /* Cache for file */
+ hbool_t destroy; /* external flag */
+ hbool_t clear_only; /* external flag */
+ hbool_t free_file_space; /* external flag */
+ hbool_t take_ownership; /* external flag */
+ hbool_t del_from_slist_on_destroy; /* external flag */
+ hbool_t during_flush; /* external flag */
+ hbool_t write_entry; /* internal flag */
+ hbool_t destroy_entry; /* internal flag */
+ hbool_t generate_image; /* internal flag */
+ hbool_t update_page_buffer; /* internal flag */
+ hbool_t was_dirty;
+ hbool_t suppress_image_entry_writes = FALSE;
+ hbool_t suppress_image_entry_frees = FALSE;
+ haddr_t entry_addr = HADDR_UNDEF;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -5948,30 +6749,39 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
HDassert(entry_ptr->type);
/* setup external flags from the flags parameter */
- destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
- clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
- free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
- take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
- del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
- during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
- generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
- update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
+ destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
+ clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
+ del_from_slist_on_destroy =
+ ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
+ during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
+ generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
+ update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
/* Set the flag for destroying the entry, based on the 'take ownership'
* and 'destroy' flags
*/
- if(take_ownership)
+ if ( take_ownership ) {
+
destroy_entry = FALSE;
- else
+
+ } else {
+
destroy_entry = destroy;
+ }
/* we will write the entry to disk if it exists, is dirty, and if the
* clear only flag is not set.
*/
- if(entry_ptr->is_dirty && !clear_only)
+ if ( entry_ptr->is_dirty && !clear_only ) {
+
write_entry = TRUE;
- else
+
+ } else {
+
write_entry = FALSE;
+ }
/* if we have received close warning, and we have been instructed to
* generate a metadata cache image, and we have actually constructed
@@ -5980,8 +6790,11 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* Set suppress_image_entry_writes to TRUE if indicated by the
* image_ctl flags.
*/
- if(cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image
- && cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries) {
+ if ( ( cache_ptr->close_warning_received ) &&
+ ( cache_ptr->image_ctl.generate_image ) &&
+ ( cache_ptr->num_entries_in_image > 0 ) &&
+ ( cache_ptr->image_entries != NULL ) ) {
+
/* Sanity checks */
HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
@@ -5991,32 +6804,60 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
suppress_image_entry_frees = TRUE;
- if(cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES)
+ if ( cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES ) {
+
suppress_image_entry_writes = TRUE;
+
+ } /* end if */
} /* end if */
- /* run initial sanity checks */
-#if H5C_DO_SANITY_CHECKS
- if(entry_ptr->in_slist) {
- HDassert(entry_ptr->is_dirty);
+ /* run initial sanity checks */
+#if H5C_DO_SANITY_CHECKS
+ if ( cache_ptr->slist_enabled ) {
- if((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
- } /* end if */
- else {
- HDassert(!entry_ptr->is_dirty);
- HDassert(!entry_ptr->flush_marker);
+ if ( entry_ptr->in_slist ) {
- if((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
- } /* end else */
+ HDassert(entry_ptr->is_dirty);
+
+ if ( ( entry_ptr->flush_marker ) && ( ! entry_ptr->is_dirty ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry in slist failed sanity checks")
+ } /* end if */
+ else {
+
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(!entry_ptr->flush_marker);
+
+ if ( ( entry_ptr->is_dirty ) || ( entry_ptr->flush_marker ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "entry failed sanity checks")
+
+ } /* end else */
+ } else { /* slist is disabled */
+
+ HDassert( ! entry_ptr->in_slist );
+
+ if ( ! entry_ptr->is_dirty ) {
+
+ if ( entry_ptr->flush_marker )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "flush marked clean entry?")
+
+ }
+ }
#endif /* H5C_DO_SANITY_CHECKS */
- if(entry_ptr->is_protected) {
+ if ( entry_ptr->is_protected ) {
+
HDassert(!entry_ptr->is_protected);
/* Attempt to flush a protected entry -- scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, \
+ "Attempt to flush a protected entry")
+
} /* end if */
/* Set entry_ptr->flush_in_progress = TRUE and set
@@ -6035,24 +6876,36 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* been requested to generate an image. In those cases, serialize the
* entry.
*/
- if(write_entry || generate_image) {
+ if ( write_entry || generate_image ) {
+
HDassert(entry_ptr->is_dirty);
- if(NULL == entry_ptr->image_ptr) {
- if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+ if ( NULL == entry_ptr->image_ptr ) {
+
+ if ( NULL == (entry_ptr->image_ptr =
+ H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, \
+ "memory allocation failed for on disk image buffer")
+
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size,
+ H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
} /* end if */
- if(!(entry_ptr->image_up_to_date)) {
+ if ( ! ( entry_ptr->image_up_to_date ) ) {
+
/* Sanity check */
HDassert(!entry_ptr->prefetched);
/* Generate the entry's image */
- if(H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
+ if ( H5C__generate_image(f, cache_ptr, entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, \
+ "can't generate entry's image")
+
} /* end if ( ! (entry_ptr->image_up_to_date) ) */
} /* end if */
@@ -6062,12 +6915,16 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* in the entry's type, we silently skip the write. This
* flag should only be used in test code.
*/
- if(write_entry) {
+ if ( write_entry ) {
+
HDassert(entry_ptr->is_dirty);
#if H5C_DO_SANITY_CHECKS
- if(cache_ptr->check_write_permitted && !(cache_ptr->write_permitted))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
+ if ( ( cache_ptr->check_write_permitted ) &&
+ ( ! ( cache_ptr->write_permitted ) ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Write when writes are always forbidden!?!?!")
#endif /* H5C_DO_SANITY_CHECKS */
/* Write the image to disk unless the write is suppressed.
@@ -6077,41 +6934,60 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
* flag should only be used in test code
*/
- if((!suppress_image_entry_writes || !entry_ptr->include_in_image)
- && (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
+ if ( ( ( ! suppress_image_entry_writes ) ||
+ ( ! entry_ptr->include_in_image ) ) &&
+ ( ( (entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0 ) ) {
+
H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
#ifdef H5_HAVE_PARALLEL
- if(cache_ptr->coll_write_list) {
- if(H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
+ if ( cache_ptr->coll_write_list ) {
+
+ if ( H5SL_insert(cache_ptr->coll_write_list, entry_ptr,
+ &entry_ptr->addr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, \
+ "unable to insert skip list item")
} /* end if */
else
{
#endif /* H5_HAVE_PARALLEL */
- if(entry_ptr->prefetched) {
+ if ( entry_ptr->prefetched ) {
+
HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+
mem_type = cache_ptr->
class_table_ptr[entry_ptr->prefetch_type_id]->
mem_type;
} /* end if */
- else
+ else {
+
mem_type = entry_ptr->type->mem_type;
+ }
+
+ if ( H5F_block_write(f, mem_type, entry_ptr->addr,
+ entry_ptr->size, entry_ptr->image_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Can't write image to file")
- if(H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
#ifdef H5_HAVE_PARALLEL
}
#endif /* H5_HAVE_PARALLEL */
+
} /* end if */
/* if the entry has a notify callback, notify it that we have
* just flushed the entry.
*/
- if(entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH,
+ entry_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client of entry flush")
+
} /* if ( write_entry ) */
/* At this point, all pre-serialize and serialize calls have been
@@ -6123,16 +6999,21 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
*/
/* start by updating the statistics */
- if(clear_only) {
+ if ( clear_only ) {
+
/* only log a clear if the entry was dirty */
- if(was_dirty) {
+ if ( was_dirty ) {
+
H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
+
} /* end if */
} else if(write_entry) {
+
HDassert(was_dirty);
/* only log a flush if we actually wrote to disk */
H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
+
} /* end else if */
/* Note that the algorithm below is (very) similar to the set of operations
@@ -6141,12 +7022,18 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
*/
/* Update the cache internal data structures. */
- if(destroy) {
+ if ( destroy ) {
+
/* Sanity checks */
- if(take_ownership)
+ if ( take_ownership ) {
+
HDassert(!destroy_entry);
- else
+
+ } else {
+
HDassert(destroy_entry);
+ }
+
HDassert(!entry_ptr->is_pinned);
/* Update stats, while entry is still in the cache */
@@ -6156,8 +7043,12 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* to be removed from the cache, send a 'before eviction' notice while
* the entry is still fully integrated in the cache.
*/
- if(entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT,
+ entry_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client about entry to evict")
/* Update the cache internal data structures as appropriate
* for a destroy. Specifically:
@@ -6177,31 +7068,40 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
*/
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
- if(entry_ptr->in_slist && del_from_slist_on_destroy)
+ if ( ( entry_ptr->in_slist ) && ( del_from_slist_on_destroy ) ) {
+
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+ }
#ifdef H5_HAVE_PARALLEL
/* Check for collective read access flag */
- if(entry_ptr->coll_access) {
+ if ( entry_ptr->coll_access ) {
+
entry_ptr->coll_access = FALSE;
+
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+
} /* end if */
#endif /* H5_HAVE_PARALLEL */
H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
/* Remove entry from tag list */
- if(H5C__untag_entry(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+ if ( H5C__untag_entry(cache_ptr, entry_ptr) < 0 )
- /* verify that the entry is no longer part of any flush dependencies */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, \
+ "can't remove entry from tag list")
+
+ /* verify that the entry is no longer part of any flush dependencies */
HDassert(entry_ptr->flush_dep_nparents == 0);
- HDassert(entry_ptr->flush_dep_nchildren == 0);
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
+
} /* end if */
else {
+
HDassert(clear_only || write_entry);
HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
/* We are either doing a flush or a clear.
*
@@ -6209,7 +7109,7 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* view of the replacement policy and the slist.
* Hence no differentiation between them.
*
- * JRM -- 7/7/07
+ * JRM -- 7/7/07
*/
H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
@@ -6225,20 +7125,34 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
/* Check for entry changing status and do notifications, etc. */
- if(was_dirty) {
- /* If the entry's type has a 'notify' callback send a 'entry cleaned'
- * notice now that the entry is fully integrated into the cache.
+ if ( was_dirty ) {
+
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
*/
- if(entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED,
+ entry_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client about entry dirty flag cleared")
+
+ /* Propagate the clean flag up the flush dependency chain
+ * if appropriate
+ */
+ if ( entry_ptr->flush_dep_ndirty_children != 0 ) {
- /* Propagate the clean flag up the flush dependency chain if appropriate */
- if(entry_ptr->flush_dep_ndirty_children != 0)
HDassert(entry_ptr->flush_dep_ndirty_children == 0);
- if(entry_ptr->flush_dep_nparents > 0)
- if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag")
+ }
+
+ if ( entry_ptr->flush_dep_nparents > 0 ) {
+
+ if ( H5C__mark_flush_dep_clean(entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, \
+ "Can't propagate flush dep clean flag")
+ }
} /* end if */
} /* end else */
@@ -6246,7 +7160,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
entry_ptr->flush_in_progress = FALSE;
/* capture the cache entry address for the log_flush call at the
- end before the entry_ptr gets freed */
+ * end before the entry_ptr gets freed
+ */
entry_addr = entry_ptr->addr;
/* Internal cache data structures should now be up to date, and
@@ -6254,7 +7169,8 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
*
* Now discard the entry if appropriate.
*/
- if(destroy) {
+ if ( destroy ) {
+
/* Sanity check */
HDassert(0 == entry_ptr->flush_dep_nparents);
@@ -6265,10 +7181,14 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
*
* Otherwise, free the buffer if it exists.
*/
- if(suppress_image_entry_frees && entry_ptr->include_in_image)
+ if ( suppress_image_entry_frees && entry_ptr->include_in_image ) {
+
entry_ptr->image_ptr = NULL;
- else if(entry_ptr->image_ptr != NULL)
+
+ } else if ( entry_ptr->image_ptr != NULL ) {
+
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ }
/* If the entry is not a prefetched entry, verify that the flush
* dependency parents addresses array has been transferred.
@@ -6276,15 +7196,18 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* If the entry is prefetched, the free_isr routine will dispose of
* the flush dependency parents addresses array if necessary.
*/
- if(!entry_ptr->prefetched) {
+ if ( ! entry_ptr->prefetched ) {
+
HDassert(0 == entry_ptr->fd_parent_count);
HDassert(NULL == entry_ptr->fd_parent_addrs);
+
} /* end if */
/* Check whether we should free the space in the file that
* the entry occupies
*/
- if(free_file_space) {
+ if ( free_file_space ) {
+
hsize_t fsf_size;
/* Sanity checks */
@@ -6304,16 +7227,27 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* it to get the size of the block of file space to free.
* Otherwise use entry_ptr->size.
*/
- if(entry_ptr->type->fsf_size) {
- if((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
+ if ( entry_ptr->type->fsf_size ) {
+
+ if ( (entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size)
+ < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, \
+ "unable to get file space free size")
+
} /* end if */
- else /* no file space free size callback -- use entry size */
+ else { /* no file space free size callback -- use entry size */
+
fsf_size = entry_ptr->size;
+ }
/* Release the space on disk */
- if(H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
+ if ( H5MF_xfree(f, entry_ptr->type->mem_type,
+ entry_ptr->addr, fsf_size) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, \
+ "unable to free file space for cache entry")
+
} /* end if ( free_file_space ) */
/* Reset the pointer to the cache the entry is within. -QAK */
@@ -6336,22 +7270,32 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
*/
cache_ptr->entries_removed_counter++;
cache_ptr->last_entry_removed_ptr = entry_ptr;
- if(entry_ptr == cache_ptr->entry_watched_for_removal)
+
+ if ( entry_ptr == cache_ptr->entry_watched_for_removal ) {
+
cache_ptr->entry_watched_for_removal = NULL;
+ }
/* Check for actually destroying the entry in memory */
/* (As opposed to taking ownership of it) */
- if(destroy_entry) {
- if(entry_ptr->is_dirty) {
+ if ( destroy_entry ) {
+
+ if ( entry_ptr->is_dirty ) {
+
/* Reset dirty flag */
entry_ptr->is_dirty = FALSE;
- /* If the entry's type has a 'notify' callback send a 'entry cleaned'
- * notice now that the entry is fully integrated into the cache.
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
*/
- if(entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
+ if ( ( entry_ptr->type->notify ) &&
+ ( (entry_ptr->type->notify)
+ (H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "can't notify client about entry dirty flag cleared")
+
} /* end if */
/* we are about to discard the in core representation --
@@ -6363,10 +7307,14 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
/* verify that the image has been freed */
HDassert(entry_ptr->image_ptr == NULL);
- if(entry_ptr->type->free_icr((void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
+ if ( entry_ptr->type->free_icr((void *)entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "free_icr callback failed")
+
} /* end if */
else {
+
HDassert(take_ownership);
/* client is taking ownership of the entry.
@@ -6374,33 +7322,50 @@ H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
* unless the entry is re-inserted properly
*/
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+
} /* end else */
} /* if (destroy) */
/* Check if we have to update the page buffer with cleared entries
* so it doesn't go out of date
*/
- if(update_page_buffer) {
+ if ( update_page_buffer ) {
+
/* Sanity check */
HDassert(!destroy);
HDassert(entry_ptr->image_ptr);
- if(f->shared->page_buf && f->shared->page_buf->page_size >= entry_ptr->size)
- if(H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
+ if ( ( f->shared->page_buf ) &&
+ ( f->shared->page_buf->page_size >= entry_ptr->size ) ) {
+
+ if ( H5PB_update_entry(f->shared->page_buf, entry_ptr->addr,
+ entry_ptr->size, entry_ptr->image_ptr) > 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "Failed to update PB with metadata cache")
+ } /* end if */
} /* end if */
- if(cache_ptr->log_flush)
- if((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
+ if ( cache_ptr->log_flush ) {
+
+ if ( (cache_ptr->log_flush)(cache_ptr, entry_addr,
+ was_dirty, flags) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "log_flush callback failed")
+
+ } /* end if */
done:
+
HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( ! entry_ptr->flush_in_progress ) );
+
HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( take_ownership ) || ( ! entry_ptr->is_dirty ) );
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C__flush_single_entry() */
@@ -7606,8 +8571,18 @@ H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
* Programmer: Mike McGreevy
* November 3, 2010
*
+ * Changes: Modified function to setup the slist before calling
+ * H%C_flush_cache(), and take it down afterwards. Note
+ * that the slist need not be empty after the call to
+ * H5C_flush_cache() since we are only flushing marked
+ * entries. Thus must set the clear_slist parameter
+ * of H5C_set_slist_enabled to TRUE.
+ *
+ * JRM -- 5/6/20
+ *
*-------------------------------------------------------------------------
*/
+
herr_t
H5C__flush_marked_entries(H5F_t * f)
{
@@ -7618,12 +8593,31 @@ H5C__flush_marked_entries(H5F_t * f)
/* Assertions */
HDassert(f != NULL);
+
+ /* Enable the slist, as it is needed in the flush */
+ if ( H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
+
/* Flush all marked entries */
- if(H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
+ if ( H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG |
+ H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0 )
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
+ /* Disable the slist. Set the clear_slist parameter to TRUE
+ * since we called H5C_flush_cache() with the
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG.
+ */
+ if ( H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
+
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C__flush_marked_entries */
@@ -8479,7 +9473,7 @@ done:
*
* Purpose: Serialize an entry and generate its image.
*
- * Note: This may cause the entry to be re-sized and/or moved in
+ * Note: This may cause the entry to be re-sized and/or moved in
* the cache.
*
* As we will not update the metadata cache's data structures
@@ -8493,14 +9487,18 @@ done:
* Programmer: Mohamad Chaarawi
* 2/10/16
*
+ * Changes: Updated sanity checks for the possibility that the skip
+ * list is disabled.
+ * JRM 5/16/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
{
- haddr_t new_addr = HADDR_UNDEF;
- haddr_t old_addr = HADDR_UNDEF;
- size_t new_len = 0;
+ haddr_t new_addr = HADDR_UNDEF;
+ haddr_t old_addr = HADDR_UNDEF;
+ size_t new_len = 0;
unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
herr_t ret_value = SUCCEED;
@@ -8521,16 +9519,24 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
old_addr = entry_ptr->addr;
/* Call client's pre-serialize callback, if there's one */
- if(entry_ptr->type->pre_serialize &&
- (entry_ptr->type->pre_serialize)(f, (void *)entry_ptr,
- entry_ptr->addr, entry_ptr->size, &new_addr, &new_len, &serialize_flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
+ if ( ( entry_ptr->type->pre_serialize ) &&
+ ( (entry_ptr->type->pre_serialize)(f, (void *)entry_ptr,
+ entry_ptr->addr, entry_ptr->size,
+ &new_addr, &new_len,
+ &serialize_flags) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to pre-serialize entry")
/* Check for any flags set in the pre-serialize callback */
- if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
+ if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) {
+
/* Check for unexpected flags from serialize callback */
- if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
+ if ( serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
+ H5C__SERIALIZE_MOVED_FLAG) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unknown serialize flag(s)")
#ifdef H5_HAVE_PARALLEL
/* In the parallel case, resizes and moves in
@@ -8559,28 +9565,40 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
* If that ceases to be the case, further
* tests will be necessary.
*/
- if(cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
+ if ( cache_ptr->aux_ptr != NULL )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "resize/move in serialize occurred in parallel case")
#endif
/* If required, resize the buffer and update the entry and the cache
- * data structures */
- if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
+ * data structures
+ */
+ if ( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) {
+
/* Sanity check */
HDassert(new_len > 0);
/* Allocate a new image buffer */
- if(NULL == (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+ if ( NULL == (entry_ptr->image_ptr =
+ H5MM_realloc(entry_ptr->image_ptr,
+ new_len + H5C_IMAGE_EXTRA_SPACE)) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, \
+ "memory allocation failed for on disk image buffer")
+
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len,
+ H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
/* Update statistics for resizing the entry */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
+ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, \
+ new_len);
/* Update the hash table for the size change */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, !(entry_ptr->is_dirty));
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
+ new_len, entry_ptr, !(entry_ptr->is_dirty));
/* The entry can't be protected since we are in the process of
* flushing it. Thus we must update the replacement policy data
@@ -8591,25 +9609,32 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
/* As we haven't updated the cache data structures for
* for the flush or flush destroy yet, the entry should
- * be in the slist. Thus update it for the size change.
+ * be in the slist if the slist is enabled. Since
+ * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the
+ * slist is enabled, call it un-conditionally.
*/
HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
+ HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled));
+
+ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
+ new_len);
/* Finally, update the entry for its new size */
entry_ptr->size = new_len;
+
} /* end if */
/* If required, udate the entry and the cache data structures
* for a move
*/
- if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
+ if ( serialize_flags & H5C__SERIALIZE_MOVED_FLAG ) {
+
/* Update stats and entries relocated counter */
H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
/* We must update cache data structures for the change in address */
- if(entry_ptr->addr == old_addr) {
+ if ( entry_ptr->addr == old_addr ) {
+
/* Delete the entry from the hash table and the slist */
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
@@ -8620,18 +9645,26 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
/* And then reinsert in the index and slist */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
+
} /* end if */
- else /* move is already done for us -- just do sanity checks */
+ else { /* move is already done for us -- just do sanity checks */
+
HDassert(entry_ptr->addr == new_addr);
+ }
} /* end if */
} /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
/* Serialize object into buffer */
- if(entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
+ if ( entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size,
+ (void *)entry_ptr) < 0 )
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
+
#if H5C_DO_MEMORY_SANITY_CHECKS
- HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE));
+ HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size,\
+ H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE));
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
entry_ptr->image_up_to_date = TRUE;
/* Propagate the fact that the entry is serialized up the
@@ -8641,9 +9674,14 @@ H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
* for flush dependency parents.
*/
HDassert(entry_ptr->flush_dep_nunser_children == 0);
- if(entry_ptr->flush_dep_nparents > 0)
- if(H5C__mark_flush_dep_serialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+
+ if ( entry_ptr->flush_dep_nparents > 0 ) {
+
+ if ( H5C__mark_flush_dep_serialized(entry_ptr) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, \
+ "Can't propagate serialization status to fd parents")
+ }
done:
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index d5599f2..775db4c 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -260,17 +260,24 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
/*-------------------------------------------------------------------------
+ *
* Function: H5C_dump_cache_skip_list
*
* Purpose: Debugging routine that prints a summary of the contents of
- * the skip list used by the metadata cache metadata cache to
- * maintain an address sorted list of dirty entries.
+ * the skip list used by the metadata cache metadata cache to
+ * maintain an address sorted list of dirty entries.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 11/15/14
*
+ * Changes: Updated function for the slist_enabled field in H5C_t.
+ * Recall that to minimize slist overhead, the slist is
+ * empty and not maintained if cache_ptr->slist_enabled is
+ * false.
+ * JRM -- 5/6/20
+ *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
@@ -288,11 +295,16 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(calling_fcn != NULL);
- HDfprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n", calling_fcn);
+ HDfprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n",
+ calling_fcn);
+ HDfprintf(stdout, " slist enabled = %d.\n",
+ (int)(cache_ptr->slist_enabled));
HDfprintf(stdout, " slist len = %u.\n", cache_ptr->slist_len);
- HDfprintf(stdout, " slist size = %lld.\n", (long long)(cache_ptr->slist_size));
+ HDfprintf(stdout, " slist size = %lld.\n",
+ (long long)(cache_ptr->slist_size));
if(cache_ptr->slist_len > 0) {
+
/* If we get this far, all entries in the cache are listed in the
* skip list -- scan the skip list generating the desired output.
*/
@@ -300,13 +312,20 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
"Num: Addr: Len: Prot/Pind: Dirty: Type:\n");
i = 0;
+
node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if(node_ptr != NULL)
+
+ if ( node_ptr != NULL ) {
+
entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- else
+
+ } else {
+
entry_ptr = NULL;
+ }
+
+ while ( entry_ptr != NULL ) {
- while(entry_ptr != NULL) {
HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
HDfprintf(stdout,
@@ -323,19 +342,27 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
node_ptr, H5SL_item(node_ptr));
/* increment node_ptr before we delete its target */
+
node_ptr = H5SL_next(node_ptr);
- if(node_ptr != NULL)
+
+ if ( node_ptr != NULL ) {
+
entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- else
+
+ } else {
+
entry_ptr = NULL;
+ }
i++;
+
} /* end while */
} /* end if */
HDfprintf(stdout, "\n\n");
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_dump_cache_skip_list() */
#endif /* NDEBUG */
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index ee286d9..0373098 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -431,36 +431,40 @@ done:
* Function: H5C__deserialize_prefetched_entry()
*
* Purpose: Deserialize the supplied prefetched entry entry, and return
- * a pointer to the deserialized entry in *entry_ptr_ptr.
- * If successful, remove the prefetched entry from the cache,
- * and free it. Insert the deserialized entry into the cache.
- *
- * Note that the on disk image of the entry is not freed --
- * a pointer to it is stored in the deserialized entries'
- * image_ptr field, and its image_up_to_date field is set to
- * TRUE unless the entry is dirtied by the deserialize call.
- *
- * If the prefetched entry is a flush dependency child,
- * destroy that flush dependency prior to calling the
- * deserialize callback. If appropriate, the flush dependency
- * relationship will be recreated by the cache client.
- *
- * If the prefetched entry is a flush dependency parent,
- * destroy the flush dependency relationship with all its
- * children. As all these children must be prefetched entries,
- * recreate these flush dependency relationships with
- * deserialized entry after it is inserted in the cache.
- *
- * Since deserializing a prefetched entry is semantically
- * equivalent to a load, issue an entry loaded nofification
- * if the notify callback is defined.
+ * a pointer to the deserialized entry in *entry_ptr_ptr.
+ * If successful, remove the prefetched entry from the cache,
+ * and free it. Insert the deserialized entry into the cache.
+ *
+ * Note that the on disk image of the entry is not freed --
+ * a pointer to it is stored in the deserialized entries'
+ * image_ptr field, and its image_up_to_date field is set to
+ * TRUE unless the entry is dirtied by the deserialize call.
+ *
+ * If the prefetched entry is a flush dependency child,
+ * destroy that flush dependency prior to calling the
+ * deserialize callback. If appropriate, the flush dependency
+ * relationship will be recreated by the cache client.
+ *
+ * If the prefetched entry is a flush dependency parent,
+ * destroy the flush dependency relationship with all its
+ * children. As all these children must be prefetched entries,
+ * recreate these flush dependency relationships with
+ * deserialized entry after it is inserted in the cache.
+ *
+ * Since deserializing a prefetched entry is semantically
+ * equivalent to a load, issue an entry loaded nofification
+ * if the notify callback is defined.
*
* Return: SUCCEED on success, and FAIL on failure.
*
- * Note that *entry_ptr_ptr is undefined on failure.
+ * Note that *entry_ptr_ptr is undefined on failure.
*
* Programmer: John Mainzer, 8/10/15
*
+ * Changes: Updated sanity checks for possibility that the slist
+ * is disabled.
+ * JRM -- 5/17/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -468,11 +472,11 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
H5C_cache_entry_t **entry_ptr_ptr, const H5C_class_t *type,
haddr_t addr, void *udata)
{
- hbool_t dirty = FALSE; /* Flag indicating whether thing was
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was
* dirtied during deserialize
*/
size_t len; /* Size of image in file */
- void * thing = NULL; /* Pointer to thing loaded */
+ void * thing = NULL; /* Pointer to thing loaded */
H5C_cache_entry_t * pf_entry_ptr; /* pointer to the prefetched entry */
/* supplied in *entry_ptr_ptr. */
H5C_cache_entry_t * ds_entry_ptr; /* Alias for thing loaded, as cache
@@ -484,8 +488,8 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
/* the prefetched entry, or NULL if */
/* that array does not exist. */
unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG |
- H5C__FLUSH_CLEAR_ONLY_FLAG);
- int i;
+ H5C__FLUSH_CLEAR_ONLY_FLAG);
+ int i;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -604,73 +608,73 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6) );
- ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- ds_entry_ptr->cache_ptr = f->shared->cache;
- ds_entry_ptr->addr = addr;
- ds_entry_ptr->size = len;
+ ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ ds_entry_ptr->cache_ptr = f->shared->cache;
+ ds_entry_ptr->addr = addr;
+ ds_entry_ptr->size = len;
HDassert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE);
- ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr;
- ds_entry_ptr->image_up_to_date = !dirty;
- ds_entry_ptr->type = type;
- ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty;
- ds_entry_ptr->dirtied = FALSE;
- ds_entry_ptr->is_protected = FALSE;
- ds_entry_ptr->is_read_only = FALSE;
- ds_entry_ptr->ro_ref_count = 0;
- ds_entry_ptr->is_pinned = FALSE;
- ds_entry_ptr->in_slist = FALSE;
- ds_entry_ptr->flush_marker = FALSE;
+ ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr;
+ ds_entry_ptr->image_up_to_date = !dirty;
+ ds_entry_ptr->type = type;
+ ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty;
+ ds_entry_ptr->dirtied = FALSE;
+ ds_entry_ptr->is_protected = FALSE;
+ ds_entry_ptr->is_read_only = FALSE;
+ ds_entry_ptr->ro_ref_count = 0;
+ ds_entry_ptr->is_pinned = FALSE;
+ ds_entry_ptr->in_slist = FALSE;
+ ds_entry_ptr->flush_marker = FALSE;
#ifdef H5_HAVE_PARALLEL
- ds_entry_ptr->clear_on_unprotect = FALSE;
- ds_entry_ptr->flush_immediately = FALSE;
- ds_entry_ptr->coll_access = FALSE;
+ ds_entry_ptr->clear_on_unprotect = FALSE;
+ ds_entry_ptr->flush_immediately = FALSE;
+ ds_entry_ptr->coll_access = FALSE;
#endif /* H5_HAVE_PARALLEL */
- ds_entry_ptr->flush_in_progress = FALSE;
- ds_entry_ptr->destroy_in_progress = FALSE;
+ ds_entry_ptr->flush_in_progress = FALSE;
+ ds_entry_ptr->destroy_in_progress = FALSE;
- ds_entry_ptr->ring = pf_entry_ptr->ring;
+ ds_entry_ptr->ring = pf_entry_ptr->ring;
/* Initialize flush dependency height fields */
- ds_entry_ptr->flush_dep_parent = NULL;
- ds_entry_ptr->flush_dep_nparents = 0;
- ds_entry_ptr->flush_dep_parent_nalloc = 0;
- ds_entry_ptr->flush_dep_nchildren = 0;
- ds_entry_ptr->flush_dep_ndirty_children = 0;
- ds_entry_ptr->flush_dep_nunser_children = 0;
+ ds_entry_ptr->flush_dep_parent = NULL;
+ ds_entry_ptr->flush_dep_nparents = 0;
+ ds_entry_ptr->flush_dep_parent_nalloc = 0;
+ ds_entry_ptr->flush_dep_nchildren = 0;
+ ds_entry_ptr->flush_dep_ndirty_children = 0;
+ ds_entry_ptr->flush_dep_nunser_children = 0;
/* Initialize fields supporting the hash table: */
- ds_entry_ptr->ht_next = NULL;
- ds_entry_ptr->ht_prev = NULL;
- ds_entry_ptr->il_next = NULL;
- ds_entry_ptr->il_prev = NULL;
+ ds_entry_ptr->ht_next = NULL;
+ ds_entry_ptr->ht_prev = NULL;
+ ds_entry_ptr->il_next = NULL;
+ ds_entry_ptr->il_prev = NULL;
/* Initialize fields supporting replacement policies: */
- ds_entry_ptr->next = NULL;
- ds_entry_ptr->prev = NULL;
+ ds_entry_ptr->next = NULL;
+ ds_entry_ptr->prev = NULL;
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- ds_entry_ptr->aux_next = NULL;
- ds_entry_ptr->aux_prev = NULL;
+ ds_entry_ptr->aux_next = NULL;
+ ds_entry_ptr->aux_prev = NULL;
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
#ifdef H5_HAVE_PARALLEL
- pf_entry_ptr->coll_next = NULL;
- pf_entry_ptr->coll_prev = NULL;
+ pf_entry_ptr->coll_next = NULL;
+ pf_entry_ptr->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
/* Initialize cache image related fields */
- ds_entry_ptr->include_in_image = FALSE;
- ds_entry_ptr->lru_rank = 0;
- ds_entry_ptr->image_dirty = FALSE;
- ds_entry_ptr->fd_parent_count = 0;
- ds_entry_ptr->fd_parent_addrs = NULL;
- ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count;
- ds_entry_ptr->fd_dirty_child_count = 0;
- ds_entry_ptr->image_fd_height = 0;
- ds_entry_ptr->prefetched = FALSE;
- ds_entry_ptr->prefetch_type_id = 0;
- ds_entry_ptr->age = 0;
- ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty;
+ ds_entry_ptr->include_in_image = FALSE;
+ ds_entry_ptr->lru_rank = 0;
+ ds_entry_ptr->image_dirty = FALSE;
+ ds_entry_ptr->fd_parent_count = 0;
+ ds_entry_ptr->fd_parent_addrs = NULL;
+ ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count;
+ ds_entry_ptr->fd_dirty_child_count = 0;
+ ds_entry_ptr->image_fd_height = 0;
+ ds_entry_ptr->prefetched = FALSE;
+ ds_entry_ptr->prefetch_type_id = 0;
+ ds_entry_ptr->age = 0;
+ ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty;
#ifndef NDEBUG /* debugging field */
- ds_entry_ptr->serialization_count = 0;
+ ds_entry_ptr->serialization_count = 0;
#endif /* NDEBUG */
H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr);
@@ -694,15 +698,20 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr,
*
* 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already
* transferred the buffer containing the image to *ds_entry_ptr,
- * this is not a memory leak.
+ * this is not a memory leak.
*
* 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG
* and H5C__FLUSH_CLEAR_ONLY_FLAG flags set.
*/
pf_entry_ptr->image_ptr = NULL;
- if(pf_entry_ptr->is_dirty) {
- HDassert(pf_entry_ptr->in_slist);
+
+ if ( pf_entry_ptr->is_dirty ) {
+
+ HDassert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) || \
+ ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist)));
+
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
+
} /* end if */
if(H5C__flush_single_entry(f, pf_entry_ptr, flush_flags) < 0)
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 2f2f3fc..3579bc2 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -18,7 +18,7 @@
* Quincey Koziol
*
* Purpose: Functions in this file implement support for parallel I/O for
- * generic cache code.
+ * generic cache code.
*
*-------------------------------------------------------------------------
*/
@@ -28,7 +28,7 @@
/****************/
#include "H5Cmodule.h" /* This source code file is part of the H5C module */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
/***********/
@@ -114,9 +114,9 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
*
* We construct the table as follows. Let:
*
- * n = num_candidates / mpi_size;
+ * n = num_candidates / mpi_size;
*
- * m = num_candidates % mpi_size;
+ * m = num_candidates % mpi_size;
*
* Now allocate an array of integers of length mpi_size + 1,
* and call this array candidate_assignment_table.
@@ -136,10 +136,10 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
* Once the table is constructed, we determine the first and
* last entry this process is to flush as follows:
*
- * first_entry_to_flush = candidate_assignment_table[mpi_rank]
+ * first_entry_to_flush = candidate_assignment_table[mpi_rank]
*
* last_entry_to_flush =
- * candidate_assignment_table[mpi_rank + 1] - 1;
+ * candidate_assignment_table[mpi_rank + 1] - 1;
*
* With these values determined, we simply scan through the
* candidate list, marking all entries in the range
@@ -163,6 +163,10 @@ static herr_t H5C__flush_candidates_in_ring(H5F_t *f, H5C_ring_t ring,
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: Updated sanity checks to allow for the possibility that
+ * the slist is disabled.
+ * JRM -- 8/3/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -185,12 +189,15 @@ H5C_apply_candidate_list(H5F_t * f,
unsigned entries_to_clear[H5C_RING_NTYPES];
haddr_t addr;
H5C_cache_entry_t * entry_ptr = NULL;
+
#if H5C_DO_SANITY_CHECKS
haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
+
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
char tbl_buf[1024];
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
+
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -200,7 +207,8 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(num_candidates > 0);
- HDassert(num_candidates <= cache_ptr->slist_len);
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( num_candidates <= cache_ptr->slist_len ));
HDassert(candidates_list_ptr != NULL);
HDassert(0 <= mpi_rank);
HDassert(mpi_rank < mpi_size);
@@ -409,6 +417,7 @@ done:
/*-------------------------------------------------------------------------
+ *
* Function: H5C_construct_candidate_list__clean_cache
*
* Purpose: Construct the list of entries that should be flushed to
@@ -424,6 +433,16 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Thus we can not longer use
+ * cache_ptr->slist_size to determine the total size of
+ * the entries we must insert in the candidate list.
+ *
+ * To address this, we now use cache_ptr->dirty_index_size
+ * instead.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -437,60 +456,82 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
HDassert( cache_ptr != NULL );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- /* As a sanity check, set space needed to the size of the skip list.
- * This should be the sum total of the sizes of all the dirty entries
- * in the metadata cache.
+ /* As a sanity check, set space needed to the dirty_index_size. This
+ * should be the sum total of the sizes of all the dirty entries
+ * in the metadata cache. Note that if the slist is enabled,
+ * cache_ptr->slist_size should equal cache_ptr->dirty_index_size.
*/
- space_needed = cache_ptr->slist_size;
+ space_needed = cache_ptr->dirty_index_size;
+
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( space_needed == cache_ptr->slist_size ) );
+
/* Recall that while we shouldn't have any protected entries at this
* point, it is possible that some dirty entries may reside on the
* pinned list at this point.
*/
- HDassert( cache_ptr->slist_size <=
+ HDassert( cache_ptr->dirty_index_size <=
(cache_ptr->dLRU_list_size + cache_ptr->pel_size) );
- HDassert( cache_ptr->slist_len <=
- (cache_ptr->dLRU_list_len + cache_ptr->pel_len) );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( cache_ptr->slist_len <=
+ (cache_ptr->dLRU_list_len + cache_ptr->pel_len) ) );
+
if(space_needed > 0) { /* we have work to do */
+
H5C_cache_entry_t *entry_ptr;
unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
- haddr_t nominated_addr;
+ haddr_t nominated_addr;
- HDassert( cache_ptr->slist_len > 0 );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( cache_ptr->slist_len > 0 ) );
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while((nominated_entries_size < space_needed) &&
- (nominated_entries_count < cache_ptr->slist_len) &&
- (entry_ptr != NULL)) {
+
+ while ( ( nominated_entries_size < space_needed ) &&
+ ( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count < cache_ptr->slist_len ) ) &&
+ ( entry_ptr != NULL ) ) {
+
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( entry_ptr->ro_ref_count == 0 );
HDassert( entry_ptr->is_dirty );
- HDassert( entry_ptr->in_slist );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( entry_ptr->in_slist ) );
nominated_addr = entry_ptr->addr;
+
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
entry_ptr = entry_ptr->aux_prev;
+
} /* end while */
+
HDassert( entry_ptr == NULL );
/* it is possible that there are some dirty entries on the
* protected entry list as well -- scan it too if necessary
*/
entry_ptr = cache_ptr->pel_head_ptr;
- while((nominated_entries_size < space_needed) &&
- (nominated_entries_count < cache_ptr->slist_len) &&
- (entry_ptr != NULL)) {
+
+ while ( ( nominated_entries_size < space_needed ) &&
+ ( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count < cache_ptr->slist_len ) ) &&
+ ( entry_ptr != NULL ) ) {
+
if(entry_ptr->is_dirty) {
+
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( entry_ptr->ro_ref_count == 0 );
@@ -498,22 +539,31 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
HDassert( entry_ptr->in_slist );
nominated_addr = entry_ptr->addr;
+
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
+
} /* end if */
entry_ptr = entry_ptr->next;
+
} /* end while */
- HDassert( nominated_entries_count == cache_ptr->slist_len );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count == cache_ptr->slist_len ) );
HDassert( nominated_entries_size == space_needed );
+
} /* end if */
done:
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_construct_candidate_list__clean_cache() */
@@ -533,6 +583,12 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Updated sanity checks to
+ * reflect this.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -550,54 +606,77 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
* cache back within its min clean constraints.
*/
if(cache_ptr->max_cache_size > cache_ptr->index_size) {
- if(((cache_ptr->max_cache_size - cache_ptr->index_size) +
- cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size)
+
+ if ( ( (cache_ptr->max_cache_size - cache_ptr->index_size) +
+ cache_ptr->cLRU_list_size) >= cache_ptr->min_clean_size ) {
+
space_needed = 0;
- else
+
+ } else {
+
space_needed = cache_ptr->min_clean_size -
((cache_ptr->max_cache_size - cache_ptr->index_size) +
cache_ptr->cLRU_list_size);
+ }
} /* end if */
else {
- if(cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size)
+
+ if(cache_ptr->min_clean_size <= cache_ptr->cLRU_list_size) {
+
space_needed = 0;
- else
+
+ } else {
+
space_needed = cache_ptr->min_clean_size -
cache_ptr->cLRU_list_size;
+ }
} /* end else */
if(space_needed > 0) { /* we have work to do */
+
H5C_cache_entry_t *entry_ptr;
unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
- HDassert( cache_ptr->slist_len > 0 );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( cache_ptr->slist_len > 0 ) );
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while((nominated_entries_size < space_needed) &&
- (nominated_entries_count < cache_ptr->slist_len) &&
- (entry_ptr != NULL) &&
- (!entry_ptr->flush_me_last)) {
- haddr_t nominated_addr;
+
+ while ( ( nominated_entries_size < space_needed ) &&
+ ( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count < cache_ptr->slist_len ) ) &&
+ ( entry_ptr != NULL ) &&
+ ( ! entry_ptr->flush_me_last ) ) {
+
+ haddr_t nominated_addr;
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( entry_ptr->ro_ref_count == 0 );
HDassert( entry_ptr->is_dirty );
- HDassert( entry_ptr->in_slist );
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( entry_ptr->in_slist ) );
nominated_addr = entry_ptr->addr;
+
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
+ "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
entry_ptr = entry_ptr->aux_prev;
+
} /* end while */
- HDassert( nominated_entries_count <= cache_ptr->slist_len );
+
+ HDassert( ( ! cache_ptr->slist_enabled ) ||
+ ( nominated_entries_count <= cache_ptr->slist_len ) );
+ HDassert( nominated_entries_size <= cache_ptr->dirty_index_size );
HDassert( nominated_entries_size >= space_needed );
} /* end if */
@@ -768,7 +847,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
* of the pre_serialize / serialize routines, this may
* cease to be the case -- requiring a review of this
* point.
- * JRM -- 4/7/15
+ * JRM -- 4/7/15
*/
entries_cleared = 0;
entries_examined = 0;
@@ -872,9 +951,9 @@ done:
herr_t
H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
{
- uint32_t clear_cnt;
- H5C_cache_entry_t * entry_ptr = NULL;
- herr_t ret_value = SUCCEED;
+ uint32_t clear_cnt;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT
@@ -1123,17 +1202,17 @@ H5C__flush_candidate_entries(H5F_t *f, unsigned entries_to_flush[H5C_RING_NTYPES
unsigned entries_to_clear[H5C_RING_NTYPES])
{
#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
+ H5C_ring_t ring;
H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 779f289..79ebd9e 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -47,13 +47,22 @@
/* Number of epoch markers active */
#define H5C__MAX_EPOCH_MARKERS 10
+
/* Cache configuration settings */
#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
#define H5C__H5C_T_MAGIC 0x005CAC0E
+
/* Initial allocated size of the "flush_dep_parent" array */
#define H5C_FLUSH_DEP_PARENT_INIT 8
+
+/* Set to TRUE to enable the slist optimization. If this field is TRUE,
+ * the slist is disabled whenever a flush is not in progress.
+ */
+#define H5C__SLIST_OPT_ENABLED TRUE
+
+
/****************************************************************************
*
* We maintain doubly linked lists of instances of H5C_cache_entry_t for a
@@ -1568,49 +1577,82 @@ if ( ( (cache_ptr)->index_size != \
* Added code to maintain the cache_ptr->slist_ring_len
* and cache_ptr->slist_ring_size arrays.
*
+ * JRM -- 4/29/20
+ * Reworked macro to support the slist_enabled field
+ * of H5C_t. If slist_enabled == TRUE, the macro
+ * functions as before. Otherwise, the macro is a no-op,
+ * and the slist must be empty.
+ *
*-------------------------------------------------------------------------
*/
+/* NOTE: The H5C__INSERT_ENTRY_IN_SLIST() macro is set up so that
+ *
+ * H5C_DO_SANITY_CHECKS
+ *
+ * and
+ *
+ * H5C_DO_SLIST_SANITY_CHECKS
+ *
+ * can be selected independantly. This is easy to miss as the
+ * two #defines are easy to confuse.
+ */
+
#if H5C_DO_SLIST_SANITY_CHECKS
+
#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) \
H5C_entry_in_skip_list((cache_ptr), (entry_ptr))
+
#else /* H5C_DO_SLIST_SANITY_CHECKS */
+
#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE
+
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
+
#if H5C_DO_SANITY_CHECKS
#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
{ \
HDassert( (cache_ptr) ); \
HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
- HDassert( !((entry_ptr)->in_slist) ); \
- HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
\
- if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
+ HDassert( !((entry_ptr)->in_slist) ); \
+ HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \
+ &((entry_ptr)->addr)) < 0) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "can't insert entry in skip list") \
+ \
+ (entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len++; \
+ (cache_ptr)->slist_size += (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\
+ (cache_ptr)->slist_len_increase++; \
+ (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
\
- (entry_ptr)->in_slist = TRUE; \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len++; \
- (cache_ptr)->slist_size += (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
- (cache_ptr)->slist_len_increase++; \
- (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( (cache_ptr)->slist_size > 0 ); \
\
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( (cache_ptr)->slist_size > 0 ); \
+ } else { /* slist disabled */ \
\
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__INSERT_ENTRY_IN_SLIST */
#else /* H5C_DO_SANITY_CHECKS */
@@ -1619,31 +1661,42 @@ if ( ( (cache_ptr)->index_size != \
{ \
HDassert( (cache_ptr) ); \
HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
- HDassert( !((entry_ptr)->in_slist) ); \
- HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
\
- if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
+ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
+ HDassert( !((entry_ptr)->in_slist) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ \
+ if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \
+ &((entry_ptr)->addr)) < 0) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "can't insert entry in skip list") \
\
- (entry_ptr)->in_slist = TRUE; \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len++; \
- (cache_ptr)->slist_size += (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
+ (entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len++; \
+ (cache_ptr)->slist_size += (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\
\
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( (cache_ptr)->slist_size > 0 ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( (cache_ptr)->slist_size > 0 ); \
\
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__INSERT_ENTRY_IN_SLIST */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1654,87 +1707,136 @@ if ( ( (cache_ptr)->index_size != \
* Function: H5C__REMOVE_ENTRY_FROM_SLIST
*
* Purpose: Remove the specified instance of H5C_cache_entry_t from the
- * index skip list in the specified instance of H5C_t. Update
- * the associated length and size fields.
+ * index skip list in the specified instance of H5C_t. Update
+ * the associated length and size fields.
*
* Return: N/A
*
* Programmer: John Mainzer, 5/10/04
*
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * JRM - 7/27/04
+ * Converted from the function H5C_remove_entry_from_tree()
+ * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of
+ * wringing a little more performance out of the cache.
+ *
+ * QAK -- 11/27/04
+ * Switched over to using skip list routines.
+ *
+ * JRM -- 3/28/07
+ * Updated sanity checks for the new is_read_only and
+ * ro_ref_count fields in H5C_cache_entry_t.
+ *
+ * JRM -- 12/13/14
+ * Added code to set cache_ptr->slist_changed to TRUE
+ * when an entry is removed from the slist.
+ *
+ * JRM -- 4/29/20
+ * Reworked macro to support the slist_enabled field
+ * of H5C_t. If slist_enabled == TRUE, the macro
+ * functions as before. Otherwise, the macro is a no-op,
+ * and the slist must be empty.
+ *
*-------------------------------------------------------------------------
*/
#if H5C_DO_SANITY_CHECKS
-#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->size > 0 ); \
- HDassert( (entry_ptr)->in_slist ); \
- HDassert( (cache_ptr)->slist_ptr ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
- \
- if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
- != (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
- \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- if(!(during_flush)) \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len--; \
- HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
- (cache_ptr)->slist_size -= (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
- (entry_ptr)->size ); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size; \
- (cache_ptr)->slist_len_increase--; \
- (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \
- (entry_ptr)->in_slist = FALSE; \
+#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( (entry_ptr)->in_slist ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ \
+ if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
+ != (entry_ptr) ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "can't delete entry from skip list") \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ if(!(during_flush)) \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len--; \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ (cache_ptr)->slist_size -= (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
+ (entry_ptr)->size ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\
+ (cache_ptr)->slist_len_increase--; \
+ (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \
+ (entry_ptr)->in_slist = FALSE; \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__REMOVE_ENTRY_FROM_SLIST */
#else /* H5C_DO_SANITY_CHECKS */
-#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
-{ \
- HDassert( (cache_ptr) ); \
- HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (entry_ptr) ); \
- HDassert( !((entry_ptr)->is_read_only) ); \
- HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
- HDassert( (entry_ptr)->in_slist ); \
- HDassert( (cache_ptr)->slist_ptr ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
- \
- if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
- != (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
- \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- if(!(during_flush)) \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len--; \
- HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
- (cache_ptr)->slist_size -= (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
- (entry_ptr)->size ); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size; \
- (entry_ptr)->in_slist = FALSE; \
+#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->in_slist ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
+ != (entry_ptr) ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "can't delete entry from skip list") \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ if(!(during_flush)) \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len--; \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ (cache_ptr)->slist_size -= (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
+ (entry_ptr)->size ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\
+ (entry_ptr)->in_slist = FALSE; \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__REMOVE_ENTRY_FROM_SLIST */
+
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1743,7 +1845,7 @@ if ( ( (cache_ptr)->index_size != \
* Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE
*
* Purpose: Update cache_ptr->slist_size for a change in the size of
- * and entry in the slist.
+ * and entry in the slist.
*
* Return: N/A
*
@@ -1751,24 +1853,30 @@ if ( ( (cache_ptr)->index_size != \
*
* Modifications:
*
- * JRM -- 8/27/06
- * Added the H5C_DO_SANITY_CHECKS version of the macro.
+ * JRM -- 8/27/06
+ * Added the H5C_DO_SANITY_CHECKS version of the macro.
*
- * This version maintains the slist_size_increase field
- * that are used in sanity checks in the flush routines.
+ * This version maintains the slist_size_increase field
+ * that are used in sanity checks in the flush routines.
*
- * All this is needed as the fractal heap needs to be
- * able to dirty, resize and/or move entries during the
- * flush.
+ * All this is needed as the fractal heap needs to be
+ * able to dirty, resize and/or move entries during the
+ * flush.
*
- * JRM -- 12/13/14
- * Note that we do not set cache_ptr->slist_changed to TRUE
- * in this case, as the structure of the slist is not
- * modified.
+ * JRM -- 12/13/14
+ * Note that we do not set cache_ptr->slist_changed to TRUE
+ * in this case, as the structure of the slist is not
+ * modified.
*
- * JRM -- 9/1/15
- * Added code to maintain the cache_ptr->slist_ring_len
- * and cache_ptr->slist_ring_size arrays.
+ * JRM -- 9/1/15
+ * Added code to maintain the cache_ptr->slist_ring_len
+ * and cache_ptr->slist_ring_size arrays.
+ *
+ * JRM -- 4/29/20
+ * Reworked macro to support the slist_enabled field
+ * of H5C_t. If slist_enabled == TRUE, the macro
+ * functions as before. Otherwise, the macro is a no-op,
+ * and the slist must be empty.
*
*-------------------------------------------------------------------------
*/
@@ -1779,32 +1887,43 @@ if ( ( (cache_ptr)->index_size != \
{ \
HDassert( (cache_ptr) ); \
HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (old_size) > 0 ); \
- HDassert( (new_size) > 0 ); \
- HDassert( (old_size) <= (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( ((cache_ptr)->slist_len > 1) || \
- ( (cache_ptr)->slist_size == (old_size) ) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
\
- (cache_ptr)->slist_size -= (old_size); \
- (cache_ptr)->slist_size += (new_size); \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
\
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >=(old_size) ); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] \
+ >= (old_size) ); \
\
- (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
- (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
\
- HDassert( (new_size) <= (cache_ptr)->slist_size ); \
- HDassert( ( (cache_ptr)->slist_len > 1 ) || \
- ( (cache_ptr)->slist_size == (new_size) ) ); \
+ (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
+ (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
+ \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#else /* H5C_DO_SANITY_CHECKS */
@@ -1813,29 +1932,39 @@ if ( ( (cache_ptr)->index_size != \
{ \
HDassert( (cache_ptr) ); \
HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
- HDassert( (old_size) > 0 ); \
- HDassert( (new_size) > 0 ); \
- HDassert( (old_size) <= (cache_ptr)->slist_size ); \
- HDassert( (cache_ptr)->slist_len > 0 ); \
- HDassert( ((cache_ptr)->slist_len > 1) || \
- ( (cache_ptr)->slist_size == (old_size) ) ); \
- HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
- HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
- HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_len ); \
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
- (cache_ptr)->slist_size ); \
\
- (cache_ptr)->slist_size -= (old_size); \
- (cache_ptr)->slist_size += (new_size); \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
\
- HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >=(old_size) ); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
+ (old_size) ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
\
- HDassert( (new_size) <= (cache_ptr)->slist_size ); \
- HDassert( ( (cache_ptr)->slist_len > 1 ) || \
- ( (cache_ptr)->slist_size == (new_size) ) ); \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -3723,10 +3852,11 @@ typedef struct H5C_tag_info_t {
* one.
*
* entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t
- * which contains the 'next' entry for an iteration. Removing
+ * which contains the 'next' entry for an iteration. Removing
* this entry must trigger a rescan of the iteration, so each
* entry removed from the cache is compared against this pointer
- * and the pointer is reset to NULL if the watched entry is removed.
+ * and the pointer is reset to NULL if the watched entry is
+ * removed.
* (This functions similarly to a "dead man's switch")
*
*
@@ -3740,6 +3870,36 @@ typedef struct H5C_tag_info_t {
* are flushed. (this has been changed -- dirty entries are now removed from
* the skip list as they are flushed. JRM - 10/25/05)
*
+ * Update 4/21/20:
+ *
+ * Profiling indicates that the cost of maintaining the skip list is
+ * significant. As it is only used on flush and close, maintaining it
+ * only when needed is an obvious optimization.
+ *
+ * To do this, we add a flag to control maintenanace of the skip list.
+ * This flag is initially set to FALSE, which disables all operations
+ * on the skip list.
+ *
+ * At the beginning of either flush or close, we scan the index list,
+ * insert all dirtly entries in the skip list, and enable operations
+ * on skip list by setting above control flag to true.
+ *
+ * At the end of a complete flush, we verify that the skip list is empty,
+ * and set the control flag back to false, so as to avoid skip list
+ * maintenance overhead until the next flush or close.
+ *
+ * In the case of a partial flush (i.e. flush marked entries), we remove
+ * all remaining entries from the skip list, and then set the control flag
+ * back to false -- again avoiding skip list maintenance overhead until
+ * the next flush or close.
+ *
+ * slist_enabled: Boolean flag used to control operation of the skip
+ * list. If this filed is FALSE, operations on the
+ * slist are no-ops, and the slist must be empty. If
+ * it is TRUE, operations on the slist proceed as usual,
+ * and all dirty entries in the metadata cache must be
+ * listed in the slist.
+ *
* slist_changed: Boolean flag used to indicate whether the contents of
* the slist has changed since the last time this flag was
* reset. This is used in the cache flush code to detect
@@ -4649,6 +4809,7 @@ typedef struct H5C_tag_info_t {
* NDEBUG is not #defined.
*
****************************************************************************/
+
struct H5C_t {
uint32_t magic;
hbool_t flush_in_progress;
@@ -4664,7 +4825,7 @@ struct H5C_t {
hbool_t evictions_enabled;
hbool_t close_warning_received;
- /* Fields for maintaining [hash table] index of entries */
+ /* Fields for maintaining the [hash table] index of entries */
uint32_t index_len;
size_t index_size;
uint32_t index_ring_len[H5C_RING_NTYPES];
@@ -4685,6 +4846,7 @@ struct H5C_t {
H5C_cache_entry_t * entry_watched_for_removal;
/* Fields for maintaining list of in-order entries, for flushing */
+ hbool_t slist_enabled;
hbool_t slist_changed;
uint32_t slist_len;
size_t slist_size;
@@ -4882,7 +5044,8 @@ struct H5C_t {
#ifndef NDEBUG
int64_t get_entry_ptr_from_addr_counter;
#endif /* NDEBUG */
-};
+
+}; /* H5C_t */
/* Define typedef for tagged cache entry iteration callbacks */
typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx);
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 7f79452..3203da6 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -2287,7 +2287,10 @@ H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr);
H5_DLL herr_t H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
H5C_cache_image_ctl_t *config_ptr);
-H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled);
+H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr,
+ hbool_t evictions_enabled);
+H5_DLL herr_t H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled,
+ hbool_t clear_slist);
H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix);
H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name,
hbool_t display_detailed_stats);
diff --git a/src/H5Fint.c b/src/H5Fint.c
index 29bbd45..9dad1d7 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -1971,6 +1971,11 @@ H5F__flush_phase2(H5F_t *f, hbool_t closing)
/* Sanity check arguments */
HDassert(f);
+ /* Inform the metadata cache that we are about to flush */
+ if(H5AC_prep_for_file_flush(f) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "prep for MDC flush failed")
+
/* Flush the entire metadata cache */
if(H5AC_flush(f) < 0)
/* Push error, but keep going*/
@@ -2003,6 +2008,11 @@ H5F__flush_phase2(H5F_t *f, hbool_t closing)
H5CX_set_mpi_file_flushing(FALSE);
#endif /* H5_HAVE_PARALLEL */
+ /* Inform the metadata cache that we are done with the flush */
+ if(H5AC_secure_from_file_flush(f) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "secure from MDC flush failed")
+
/* Flush out the metadata accumulator */
if(H5F__accum_flush(f->shared) < 0)
/* Push error, but keep going*/