summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt8
-rw-r--r--src/H5AC.c240
-rw-r--r--src/H5ACdbg.c6
-rw-r--r--src/H5ACprivate.h12
-rw-r--r--src/H5C.c10061
-rw-r--r--src/H5Cdbg.c9
-rw-r--r--src/H5Cimage.c9
-rw-r--r--src/H5Cmpio.c60
-rw-r--r--src/H5Cpkg.h1273
-rw-r--r--src/H5Cprivate.h1648
-rw-r--r--src/H5Cquery.c10
-rw-r--r--src/H5FDdevelop.h267
-rw-r--r--src/H5FDmulti.c27
-rw-r--r--src/H5FDprivate.h3
-rw-r--r--src/H5FDpublic.h243
-rw-r--r--src/H5Fint.c22
-rw-r--r--src/H5Fpkg.h9
-rw-r--r--src/H5Fvfd_swmr.c40
-rw-r--r--src/H5HG.c2
-rw-r--r--src/H5HGprivate.h2
-rw-r--r--src/H5HGtrap.c48
-rw-r--r--src/H5Idevelop.h139
-rw-r--r--src/H5Iprivate.h3
-rw-r--r--src/H5Ipublic.h90
-rw-r--r--src/H5Ldevelop.h314
-rw-r--r--src/H5Lprivate.h3
-rw-r--r--src/H5Lpublic.h257
-rw-r--r--src/H5MF.c4
-rw-r--r--src/H5MFsection.c2
-rw-r--r--src/H5O.c7
-rw-r--r--src/H5PB.c1196
-rw-r--r--src/H5PBpkg.h766
-rw-r--r--src/H5TSdevelop.h (renamed from src/H5TSpublic.h)14
-rw-r--r--src/H5TSprivate.h4
-rw-r--r--src/H5Tdevelop.h227
-rw-r--r--src/H5Tprivate.h3
-rw-r--r--src/H5Tpublic.h179
-rw-r--r--src/H5VLcallback.c2
-rw-r--r--src/H5VLnative.h29
-rw-r--r--src/H5VLnative_attr.c31
-rw-r--r--src/H5VLnative_dataset.c31
-rw-r--r--src/H5VLnative_datatype.c31
-rw-r--r--src/H5VLnative_file.c39
-rw-r--r--src/H5VLnative_group.c31
-rw-r--r--src/H5VLnative_introspect.c33
-rw-r--r--src/H5VLnative_link.c31
-rw-r--r--src/H5VLnative_object.c33
-rw-r--r--src/H5VLnative_token.c4
-rw-r--r--src/H5Zdevelop.h421
-rw-r--r--src/H5Zprivate.h3
-rw-r--r--src/H5Zpublic.h373
-rw-r--r--src/H5err.txt4
-rw-r--r--src/H5private.h26
-rw-r--r--src/H5public.h38
-rw-r--r--src/H5system.c44
-rw-r--r--src/H5trace.c2
-rw-r--r--src/H5win32defs.h1
-rw-r--r--src/Makefile.am13
-rw-r--r--src/hdf5.h13
59 files changed, 10078 insertions, 8362 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 5ad2bae..a055271 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -254,6 +254,7 @@ set (H5FD_SOURCES
set (H5FD_HDRS
${HDF5_SRC_DIR}/H5FDcore.h
+ ${HDF5_SRC_DIR}/H5FDdevelop.h
${HDF5_SRC_DIR}/H5FDdirect.h
${HDF5_SRC_DIR}/H5FDfamily.h
${HDF5_SRC_DIR}/H5FDhdfs.h
@@ -358,7 +359,6 @@ set (H5HG_SOURCES
${HDF5_SRC_DIR}/H5HGcache.c
${HDF5_SRC_DIR}/H5HGdbg.c
${HDF5_SRC_DIR}/H5HGquery.c
- ${HDF5_SRC_DIR}/H5HGtrap.c
)
set (H5HG_HDRS
@@ -394,6 +394,7 @@ set (H5I_SOURCES
${HDF5_SRC_DIR}/H5Itest.c
)
set (H5I_HDRS
+ ${HDF5_SRC_DIR}/H5Idevelop.h
${HDF5_SRC_DIR}/H5Ipublic.h
)
IDE_GENERATED_PROPERTIES ("H5I" "${H5I_HDRS}" "${H5I_SOURCES}" )
@@ -406,6 +407,7 @@ set (H5L_SOURCES
${HDF5_SRC_DIR}/H5Lint.c
)
set (H5L_HDRS
+ ${HDF5_SRC_DIR}/H5Ldevelop.h
${HDF5_SRC_DIR}/H5Lpublic.h
)
IDE_GENERATED_PROPERTIES ("H5L" "${H5L_HDRS}" "${H5L_SOURCES}" )
@@ -641,6 +643,7 @@ set (H5T_SOURCES
)
set (H5T_HDRS
+ ${HDF5_SRC_DIR}/H5Tdevelop.h
${HDF5_SRC_DIR}/H5Tpublic.h
)
IDE_GENERATED_PROPERTIES ("H5T" "${H5T_HDRS}" "${H5T_SOURCES}" )
@@ -650,7 +653,7 @@ set (H5TS_SOURCES
${HDF5_SRC_DIR}/H5TS.c
)
set (H5TS_HDRS
- ${HDF5_SRC_DIR}/H5TSpublic.h
+ ${HDF5_SRC_DIR}/H5TSdevelop.h
)
IDE_GENERATED_PROPERTIES ("H5TS" "${H5TS_HDRS}" "${H5TS_SOURCES}" )
@@ -722,6 +725,7 @@ endif ()
set (H5Z_HDRS
+ ${HDF5_SRC_DIR}/H5Zdevelop.h
${HDF5_SRC_DIR}/H5Zpublic.h
)
IDE_GENERATED_PROPERTIES ("H5Z" "${H5Z_HDRS}" "${H5Z_SOURCES}" )
diff --git a/src/H5AC.c b/src/H5AC.c
index 08cedca..c913805 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -43,7 +43,6 @@
#include "H5CXprivate.h" /* API Contexts */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files */
-#include "H5FDprivate.h" /* File drivers */
#include "H5Iprivate.h" /* IDs */
#include "H5Pprivate.h" /* Property lists */
#include "H5SLprivate.h" /* Skip Lists */
@@ -61,8 +60,8 @@
/********************/
static herr_t H5AC__check_if_write_permitted(const H5F_t *f, hbool_t *write_permitted_ptr);
-static herr_t H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
- H5C_auto_size_ctl_t *int_conf_ptr);
+static herr_t H5AC__ext_config_2_int_config(const H5AC_cache_config_t *ext_conf_ptr,
+ H5C_auto_size_ctl_t * int_conf_ptr);
#if H5AC_DO_TAGGING_SANITY_CHECKS
static herr_t H5AC__verify_tag(const H5AC_class_t *type);
#endif /* H5AC_DO_TAGGING_SANITY_CHECKS */
@@ -94,29 +93,27 @@ hbool_t H5_coll_api_sanity_check_g = false;
*/
static const H5AC_class_t *const H5AC_class_s[] = {
- H5AC_BT, /* ( 0) B-tree nodes */
- H5AC_SNODE, /* ( 1) symbol table nodes */
- H5AC_LHEAP_PRFX, /* ( 2) local heap prefix */
- H5AC_LHEAP_DBLK, /* ( 3) local heap data block */
- H5AC_GHEAP, /* ( 4) global heap */
- H5AC_OHDR, /* ( 5) object header */
- H5AC_OHDR_CHK, /* ( 6) object header chunk */
- H5AC_BT2_HDR, /* ( 7) v2 B-tree header */
- H5AC_BT2_INT, /* ( 8) v2 B-tree internal node */
- H5AC_BT2_LEAF, /* ( 9) v2 B-tree leaf node */
- H5AC_FHEAP_HDR, /* (10) fractal heap header */
- H5AC_FHEAP_DBLOCK, /* (11) fractal heap direct block */
- H5AC_FHEAP_IBLOCK, /* (12) fractal heap indirect block */
- H5AC_FSPACE_HDR, /* (13) free space header */
- H5AC_FSPACE_SINFO, /* (14) free space sections */
- H5AC_SOHM_TABLE, /* (15) shared object header message */
- /* master table */
- H5AC_SOHM_LIST, /* (16) shared message index stored as */
- /* a list */
- H5AC_EARRAY_HDR, /* (17) extensible array header */
- H5AC_EARRAY_IBLOCK, /* (18) extensible array index block */
- H5AC_EARRAY_SBLOCK, /* (19) extensible array super block */
- H5AC_EARRAY_DBLOCK, /* (20) extensible array data block */
+ H5AC_BT, /* ( 0) B-tree nodes */
+ H5AC_SNODE, /* ( 1) symbol table nodes */
+ H5AC_LHEAP_PRFX, /* ( 2) local heap prefix */
+ H5AC_LHEAP_DBLK, /* ( 3) local heap data block */
+ H5AC_GHEAP, /* ( 4) global heap */
+ H5AC_OHDR, /* ( 5) object header */
+ H5AC_OHDR_CHK, /* ( 6) object header chunk */
+ H5AC_BT2_HDR, /* ( 7) v2 B-tree header */
+ H5AC_BT2_INT, /* ( 8) v2 B-tree internal node */
+ H5AC_BT2_LEAF, /* ( 9) v2 B-tree leaf node */
+ H5AC_FHEAP_HDR, /* (10) fractal heap header */
+ H5AC_FHEAP_DBLOCK, /* (11) fractal heap direct block */
+ H5AC_FHEAP_IBLOCK, /* (12) fractal heap indirect block */
+ H5AC_FSPACE_HDR, /* (13) free space header */
+ H5AC_FSPACE_SINFO, /* (14) free space sections */
+ H5AC_SOHM_TABLE, /* (15) shared object header message master table */
+ H5AC_SOHM_LIST, /* (16) shared message index stored as a list */
+ H5AC_EARRAY_HDR, /* (17) extensible array header */
+ H5AC_EARRAY_IBLOCK, /* (18) extensible array index block */
+ H5AC_EARRAY_SBLOCK, /* (19) extensible array super block */
+ H5AC_EARRAY_DBLOCK, /* (20) extensible array data block */
H5AC_EARRAY_DBLK_PAGE, /* (21) extensible array data block page */
H5AC_FARRAY_HDR, /* (22) fixed array header */
H5AC_FARRAY_DBLOCK, /* (23) fixed array data block */
@@ -359,19 +356,14 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
if (NULL == (aux_ptr->candidate_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list")
- if (aux_ptr != NULL)
- if (aux_ptr->mpi_rank == 0)
- f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE,
- (H5AC_NTYPES - 1), H5AC_class_s, H5AC__check_if_write_permitted,
- TRUE, H5AC__log_flushed_entry, (void *)aux_ptr);
- else
- f->shared->cache =
- H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- H5AC_class_s, H5AC__check_if_write_permitted, TRUE, NULL, (void *)aux_ptr);
+ if (aux_ptr->mpi_rank == 0)
+ f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE,
+ (H5AC_NTYPES - 1), H5AC_class_s, H5AC__check_if_write_permitted,
+ TRUE, H5AC__log_flushed_entry, (void *)aux_ptr);
else
f->shared->cache =
H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE, H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- H5AC_class_s, H5AC__check_if_write_permitted, TRUE, NULL, NULL);
+ H5AC_class_s, H5AC__check_if_write_permitted, TRUE, NULL, (void *)aux_ptr);
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
@@ -471,6 +463,14 @@ done:
* Programmer: Robb Matzke
* Jul 9 1997
*
+ * Changes:
+ *
+ * In the parallel case, added code to setup the MDC slist
+ * before the call to H5AC__flush_entries() and take it down
+ * afterwards.
+ *
+ * JRM -- 7/29/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -522,19 +522,46 @@ H5AC_dest(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed")
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache);
- if (aux_ptr)
+
+ if (aux_ptr) {
+
/* Sanity check */
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- /* If the file was opened R/W, attempt to flush all entries
- * from rank 0 & Bcast clean list to other ranks.
- *
- * Must not flush in the R/O case, as this will trigger the
- * free space manager settle routines.
- */
- if (H5F_ACC_RDWR & H5F_INTENT(f))
- if (H5AC__flush_entries(f) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+ /* If the file was opened R/W, attempt to flush all entries
+ * from rank 0 & Bcast clean list to other ranks.
+ *
+ * Must not flush in the R/O case, as this will trigger the
+ * free space manager settle routines.
+ *
+ * Must also enable the skip list before the call to
+ * H5AC__flush_entries() and disable it afterwards, as the
+ * skip list will be disabled after the previous flush.
+ *
+ * Note that H5C_dest() does slist setup and take down as well.
+ * Unfortunately, we can't do the setup and take down just once,
+ * as H5C_dest() is called directly in the test code.
+ *
+ * Fortunately, the cache should be clean or close to it at this
+ * point, so the overhead should be minimal.
+ */
+ if (H5F_ACC_RDWR & H5F_INTENT(f)) {
+
+ /* enable and load the slist */
+ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
+ if (H5AC__flush_entries(f) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+
+ /* disable the slist -- should be empty */
+ if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
+ }
+ }
#endif /* H5_HAVE_PARALLEL */
/* Destroy the cache */
@@ -1211,6 +1238,107 @@ done:
} /* H5AC_prep_for_file_close() */
/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_prep_for_file_flush
+ *
+ * Purpose: This function should be called just prior to the first
+ * call to H5AC_flush() during a file flush.
+ *
+ * Its purpose is to handly any setup required prior to
+ * metadata cache flush.
+ *
+ * Initially, this means setting up the slist prior to the
+ * flush. We do this in a seperate call because
+ * H5F__flush_phase2() make repeated calls to H5AC_flush().
+ * Handling this detail in separate calls allows us to avoid
+ * the overhead of setting up and taking down the skip list
+ * repeatedly.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/5/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_prep_for_file_flush(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist enabled failed")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC_prep_for_file_flush() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_secure_from_file_flush
+ *
+ * Purpose: This function should be called just after the last
+ * call to H5AC_flush() during a file flush.
+ *
+ * Its purpose is to perform any necessary cleanup after the
+ * metadata cache flush.
+ *
+ * The objective of the call is to allow the metadata cache
+ * to do any necessary necessary cleanup work after a cache
+ * flush.
+ *
+ * Initially, this means taking down the slist after the
+ * flush. We do this in a seperate call because
+ * H5F__flush_phase2() make repeated calls to H5AC_flush().
+ * Handling this detail in separate calls allows us to avoid
+ * the overhead of setting up and taking down the skip list
+ * repeatedly.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 5/5/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_secure_from_file_flush(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist enabled failed")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC_secure_from_file_flush() */
+
+/*-------------------------------------------------------------------------
+ *
* Function: H5AC_create_flush_dependency()
*
* Purpose: Create a flush dependency between two entries in the metadata
@@ -1688,14 +1816,14 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr, size_t *cur_size_ptr,
- uint32_t *cur_num_entries_ptr)
+H5AC_get_cache_size(const H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
+ size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if (H5C_get_cache_size((H5C_t *)cache_ptr, max_size_ptr, min_clean_size_ptr, cur_size_ptr,
+ if (H5C_get_cache_size((const H5C_t *)cache_ptr, max_size_ptr, min_clean_size_ptr, cur_size_ptr,
cur_num_entries_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_size() failed")
@@ -1742,13 +1870,13 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr)
+H5AC_get_cache_hit_rate(const H5AC_t *cache_ptr, double *hit_rate_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if (H5C_get_cache_hit_rate((H5C_t *)cache_ptr, hit_rate_ptr) < 0)
+ if (H5C_get_cache_hit_rate((const H5C_t *)cache_ptr, hit_rate_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_hit_rate() failed")
done:
@@ -1794,7 +1922,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config_ptr)
+H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, const H5AC_cache_config_t *config_ptr)
{
H5C_auto_size_ctl_t internal_config;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1895,7 +2023,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_validate_config(H5AC_cache_config_t *config_ptr)
+H5AC_validate_config(const H5AC_cache_config_t *config_ptr)
{
H5C_auto_size_ctl_t internal_config;
herr_t ret_value = SUCCEED; /* Return value */
@@ -2076,7 +2204,7 @@ H5AC__check_if_write_permitted(const H5F_t
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr, H5C_auto_size_ctl_t *int_conf_ptr)
+H5AC__ext_config_2_int_config(const H5AC_cache_config_t *ext_conf_ptr, H5C_auto_size_ctl_t *int_conf_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -2637,13 +2765,13 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_get_mdc_image_info(H5AC_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len)
+H5AC_get_mdc_image_info(const H5AC_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- if (H5C_get_mdc_image_info((H5C_t *)cache_ptr, image_addr, image_len) < 0)
+ if (H5C_get_mdc_image_info((const H5C_t *)cache_ptr, image_addr, image_len) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't retrieve cache image info")
done:
diff --git a/src/H5ACdbg.c b/src/H5ACdbg.c
index 71ee79c..2d24adb 100644
--- a/src/H5ACdbg.c
+++ b/src/H5ACdbg.c
@@ -257,10 +257,6 @@ H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr, haddr_t child_addr,
*
* Programmer: John Mainzer, 5/30/14
*
- * Changes: None.
- *
- * JRM -- 9/17/16
- *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
@@ -335,8 +331,6 @@ H5AC_get_serialization_in_progress(H5F_t *f)
*
* Programmer: John Mainzer, 6/18/16
*
- * Changes: None.
- *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index e85f90c..188f9ee 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -385,6 +385,8 @@ H5_DLL herr_t H5AC_insert_entry(H5F_t *f, const H5AC_class_t *type, haddr_t addr
unsigned int flags);
H5_DLL herr_t H5AC_pin_protected_entry(void *thing);
H5_DLL herr_t H5AC_prep_for_file_close(H5F_t *f);
+H5_DLL herr_t H5AC_prep_for_file_flush(H5F_t *f);
+H5_DLL herr_t H5AC_secure_from_file_flush(H5F_t *f);
H5_DLL herr_t H5AC_create_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL void * H5AC_protect(H5F_t *f, const H5AC_class_t *type, haddr_t addr, void *udata, unsigned flags);
H5_DLL herr_t H5AC_resize_entry(void *thing, size_t new_size);
@@ -402,20 +404,20 @@ H5_DLL herr_t H5AC_evict(H5F_t *f);
H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, const H5AC_class_t *type, haddr_t addr, unsigned flags);
H5_DLL herr_t H5AC_remove_entry(void *entry);
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr, H5AC_cache_config_t *config_ptr);
-H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
+H5_DLL herr_t H5AC_get_cache_size(const H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr);
H5_DLL herr_t H5AC_get_cache_flush_in_progress(H5AC_t *cache_ptr, hbool_t *flush_in_progress_ptr);
-H5_DLL herr_t H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr);
+H5_DLL herr_t H5AC_get_cache_hit_rate(const H5AC_t *cache_ptr, double *hit_rate_ptr);
H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t *cache_ptr);
-H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config_ptr);
-H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
+H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, const H5AC_cache_config_t *config_ptr);
+H5_DLL herr_t H5AC_validate_config(const H5AC_cache_config_t *config_ptr);
/* Cache image routines */
H5_DLL herr_t H5AC_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len, hbool_t rw);
H5_DLL herr_t H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr);
H5_DLL hbool_t H5AC_cache_image_pending(const H5F_t *f);
H5_DLL herr_t H5AC_force_cache_image_load(H5F_t *f);
-H5_DLL herr_t H5AC_get_mdc_image_info(H5AC_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len);
+H5_DLL herr_t H5AC_get_mdc_image_info(const H5AC_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len);
/* Tag & Ring routines */
H5_DLL void H5AC_tag(haddr_t metadata_tag, haddr_t *prev_tag);
diff --git a/src/H5C.c b/src/H5C.c
index 2b9ecd5..8e72767 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -11,6 +11,9 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/* TEMPORARY (during VFD SWMR sync with develop - reduces churn) */
+/* clang-format off */
+
/*-------------------------------------------------------------------------
*
* Created: H5C.c
@@ -29,23 +32,27 @@
/**************************************************************************
*
- * To Do:
+ * To Do:
*
- * Code Changes:
+ * Code Changes:
*
* - Remove extra functionality in H5C__flush_single_entry()?
*
* - Change protect/unprotect to lock/unlock.
*
- * - Flush entries in increasing address order in
- * H5C__make_space_in_cache().
+ * - Flush entries in increasing address order in
+ * H5C__make_space_in_cache().
+ *
+ * - Also in H5C__make_space_in_cache(), use high and low water marks
+ * to reduce the number of I/O calls.
*
- * - Also in H5C__make_space_in_cache(), use high and low water marks
- * to reduce the number of I/O calls.
+ * - When flushing, attempt to combine contiguous entries to reduce
+ * I/O overhead. Can't do this just yet as some entries are not
+ * contiguous. Do this in parallel only or in serial as well?
*
- * - When flushing, attempt to combine contiguous entries to reduce
- * I/O overhead. Can't do this just yet as some entries are not
- * contiguous. Do this in parallel only or in serial as well?
+ * - Fix nodes in memory to point directly to the skip list node from
+ * the LRU list, eliminating skip list lookups when evicting objects
+ * from the cache.
*
* - Create MPI type for dirty objects when flushing in parallel.
*
@@ -130,15 +137,15 @@ static herr_t H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_
static herr_t H5C__flush_invalidate_cache(H5F_t *f, unsigned flags);
-static herr_t H5C_flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
+static herr_t H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
static herr_t H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags);
-static void *H5C_load_entry(H5F_t *f,
+static void *H5C__load_entry(H5F_t *f,
#ifdef H5_HAVE_PARALLEL
- hbool_t coll_access,
+ hbool_t coll_access,
#endif /* H5_HAVE_PARALLEL */
- const H5C_class_t *type, haddr_t addr, void *udata);
+ const H5C_class_t *type, haddr_t addr, void *udata);
static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry);
@@ -146,18 +153,17 @@ static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry);
static herr_t H5C__serialize_ring(H5F_t *f, H5C_ring_t ring);
static herr_t H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
-
static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len,
hbool_t actual);
#if H5C_DO_SLIST_SANITY_CHECKS
-static hbool_t H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr);
+static hbool_t H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr);
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
-static herr_t H5C_validate_lru_list(H5C_t *cache_ptr);
-static herr_t H5C_validate_pinned_entry_list(H5C_t *cache_ptr);
-static herr_t H5C_validate_protected_entry_list(H5C_t *cache_ptr);
+static herr_t H5C__validate_lru_list(H5C_t *cache_ptr);
+static herr_t H5C__validate_pinned_entry_list(H5C_t *cache_ptr);
+static herr_t H5C__validate_protected_entry_list(H5C_t *cache_ptr);
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#ifndef NDEBUG
@@ -193,16 +199,16 @@ H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t);
* Function: H5C_create
*
* Purpose: Allocate, initialize, and return the address of a new
- * instance of H5C_t.
+ * instance of H5C_t.
*
- * In general, the max_cache_size parameter must be positive,
- * and the min_clean_size parameter must lie in the closed
- * interval [0, max_cache_size].
+ * In general, the max_cache_size parameter must be positive,
+ * and the min_clean_size parameter must lie in the closed
+ * interval [0, max_cache_size].
*
- * The check_write_permitted parameter must either be NULL,
- * or point to a function of type H5C_write_permitted_func_t.
- * If it is NULL, the cache will use the write_permitted
- * flag to determine whether writes are permitted.
+ * The check_write_permitted parameter must either be NULL,
+ * or point to a function of type H5C_write_permitted_func_t.
+ * If it is NULL, the cache will use the write_permitted
+ * flag to determine whether writes are permitted.
*
* Return: Success: Pointer to the new instance.
*
@@ -211,6 +217,82 @@ H5FL_SEQ_DEFINE_STATIC(H5C_cache_entry_ptr_t);
* Programmer: John Mainzer
* 6/2/04
*
+ * Modifications:
+ *
+ * JRM -- 7/20/04
+ * Updated for the addition of the hash table.
+ *
+ * JRM -- 10/5/04
+ * Added call to H5C_reset_cache_hit_rate_stats(). Also
+ * added initialization for cache_is_full flag and for
+ * resize_ctl.
+ *
+ * JRM -- 11/12/04
+ * Added initialization for the new size_decreased field.
+ *
+ * JRM -- 11/17/04
+ * Added/updated initialization for the automatic cache
+ * size control data structures.
+ *
+ * JRM -- 6/24/05
+ * Added support for the new write_permitted field of
+ * the H5C_t structure.
+ *
+ * JRM -- 7/5/05
+ * Added the new log_flush parameter and supporting code.
+ *
+ * JRM -- 9/21/05
+ * Added the new aux_ptr parameter and supporting code.
+ *
+ * JRM -- 1/20/06
+ * Added initialization of the new prefix field in H5C_t.
+ *
+ * JRM -- 3/16/06
+ * Added initialization for the pinned entry related fields.
+ *
+ * JRM -- 5/31/06
+ * Added initialization for the trace_file_ptr field.
+ *
+ * JRM -- 8/19/06
+ * Added initialization for the flush_in_progress field.
+ *
+ * JRM -- 8/25/06
+ * Added initialization for the slist_len_increase and
+ * slist_size_increase fields. These fields are used
+ * for sanity checking in the flush process, and are not
+ * compiled in unless H5C_DO_SANITY_CHECKS is TRUE.
+ *
+ * JRM -- 3/28/07
+ * Added initialization for the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * JRM -- 7/27/07
+ * Added initialization for the new evictions_enabled
+ * field of H5C_t.
+ *
+ * JRM -- 12/31/07
+ * Added initialization for the new flash cache size increase
+ * related fields of H5C_t.
+ *
+ * JRM -- 11/5/08
+ * Added initialization for the new clean_index_size and
+ * dirty_index_size fields of H5C_t.
+ *
+ *
+ * Missing entries?
+ *
+ *
+ * JRM -- 4/20/20
+ * Added initialization for the slist_enabled field. Recall
+ * that the slist is used to flush metadata cache entries
+ * in (roughly) increasing address order. While this is
+ * needed at flush and close, it is not used elsewhere.
+ * The slist_enabled field exists to allow us to construct
+ * the slist when needed, and leave it empty otherwise -- thus
+ * avoiding the overhead of maintaining it.
+ *
+ * JRM -- 4/29/20
+ *
*-------------------------------------------------------------------------
*/
H5C_t *
@@ -289,9 +371,8 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id,
cache_ptr->slist_ring_size[i] = (size_t)0;
} /* end for */
- for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ for (i = 0; i < H5C__HASH_TABLE_LEN; i++)
(cache_ptr->index)[i] = NULL;
- }
cache_ptr->il_len = 0;
cache_ptr->il_size = (size_t)0;
@@ -309,10 +390,16 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id,
cache_ptr->ignore_tags = FALSE;
cache_ptr->num_objs_corked = 0;
+ /* slist field initializations */
+ cache_ptr->slist_enabled = !H5C__SLIST_OPT_ENABLED;
cache_ptr->slist_changed = FALSE;
cache_ptr->slist_len = 0;
cache_ptr->slist_size = (size_t)0;
+ /* slist_ring_len, slist_ring_size, and
+ * slist_ptr initialized above.
+ */
+
#if H5C_DO_SANITY_CHECKS
cache_ptr->slist_len_increase = 0;
cache_ptr->slist_size_increase = 0;
@@ -383,8 +470,8 @@ H5C_create(size_t max_cache_size, size_t min_clean_size, int max_type_id,
(cache_ptr->resize_ctl).max_increment = H5C__DEF_AR_MAX_INCREMENT;
(cache_ptr->resize_ctl).flash_incr_mode = H5C_flash_incr__off;
- (cache_ptr->resize_ctl).flash_multiple = 1.0f;
- (cache_ptr->resize_ctl).flash_threshold = 0.25f;
+ (cache_ptr->resize_ctl).flash_multiple = 1.0;
+ (cache_ptr->resize_ctl).flash_threshold = 0.25;
(cache_ptr->resize_ctl).decr_mode = H5C_decr__off;
(cache_ptr->resize_ctl).upper_hr_threshold = H5C__DEF_AR_UPPER_THRESHHOLD;
@@ -618,23 +705,23 @@ H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr,
} /* H5C_def_auto_resize_rpt_fcn() */
/*-------------------------------------------------------------------------
- * Function: H5C_free_tag_list_cb
+ * Function: H5C__free_tag_list_cb
*
* Purpose: Callback function to free tag nodes from the skip list.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Vailin Choi
- * January 2014
+ * January 2014
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
+H5C__free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
{
H5C_tag_info_t *tag_info = (H5C_tag_info_t *)_item;
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_NOERR
HDassert(tag_info);
@@ -642,15 +729,15 @@ H5C_free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED
tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
FUNC_LEAVE_NOAPI(0)
-} /* H5C_free_tag_list_cb() */
+} /* H5C__free_tag_list_cb() */
/*-------------------------------------------------------------------------
*
* Function: H5C_prep_for_file_close
*
* Purpose: This function should be called just prior to the cache
- * flushes at file close. There should be no protected
- * entries in the cache at this point.
+ * flushes at file close. There should be no protected
+ * entries in the cache at this point.
*
* Return: Non-negative on success/Negative on failure
*
@@ -663,9 +750,8 @@ herr_t
H5C_prep_for_file_close(H5F_t *f)
{
H5C_t * cache_ptr;
- hbool_t image_generated = FALSE; /* Whether a cache image was */
- /* generated */
- herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t image_generated = FALSE; /* Whether a cache image was generated */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -745,12 +831,26 @@ done:
* This function fails if any object are protected since the
* resulting file might not be consistent.
*
- * Note that *cache_ptr has been freed upon successful return.
+ * Note that *cache_ptr has been freed upon successful return.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
- * 6/2/04
+ * 6/2/04
+ *
+ * Modifications:
+ *
+ * JRM -- 5/15/20
+ *
+ * Updated the function to enable the slist prior to the
+ * call to H5C__flush_invalidate_cache().
+ *
+ * Arguably, it shouldn't be necessary to re-enable the
+ * slist after the call to H5C__flush_invalidate_cache(), as
+ * the metadata cache should be discarded. However, in the
+ * test code, we make multiple calls to H5C_dest(). Thus
+ * we re-enable the slist on failure if it and the cache
+ * still exist.
*
*-------------------------------------------------------------------------
*/
@@ -772,27 +872,47 @@ H5C_dest(H5F_t *f)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats")
#endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
+ /* Enable the slist, as it is needed in the flush */
+ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
/* Flush and invalidate all cache entries */
if (H5C__flush_invalidate_cache(f, H5C__NO_FLAGS_SET) < 0)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
/* Generate & write cache image if requested */
- if (cache_ptr->image_ctl.generate_image)
+ if (cache_ptr->image_ctl.generate_image) {
+
if (H5C__generate_cache_image(f, cache_ptr) < 0)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image")
+ }
if (cache_ptr->slist_ptr != NULL) {
+
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+
H5SL_close(cache_ptr->slist_ptr);
+
cache_ptr->slist_ptr = NULL;
+
} /* end if */
if (cache_ptr->tag_list != NULL) {
- H5SL_destroy(cache_ptr->tag_list, H5C_free_tag_list_cb, NULL);
+
+ H5SL_destroy(cache_ptr->tag_list, H5C__free_tag_list_cb, NULL);
+
cache_ptr->tag_list = NULL;
+
} /* end if */
- if (cache_ptr->log_info != NULL)
+ if (cache_ptr->log_info != NULL) {
+
H5MM_xfree(cache_ptr->log_info);
+ }
#ifndef NDEBUG
#if H5C_DO_SANITY_CHECKS
@@ -810,7 +930,18 @@ H5C_dest(H5F_t *f)
cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
done:
+
+ if ((ret_value < 0) && (cache_ptr) && (cache_ptr->slist_ptr)) {
+
+ /* need this for test code -- see change note for details */
+
+ if (H5C_set_slist_enabled(f->shared->cache, FALSE, FALSE) < 0)
+
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist on flush dest failure failed")
+ }
+
FUNC_LEAVE_NOAPI(ret_value)
+
} /* H5C_dest() */
/*-------------------------------------------------------------------------
@@ -821,7 +952,15 @@ done:
* Return: Non-negative on success/Negative on failure
*
* Programmer: Vailin Choi
- * Dec 2013
+ * Dec 2013
+ *
+ * Modifications:
+ *
+ * JRM -- 5/5/20
+ *
+ * Added code to enable the skip list prior to the call
+ * to H5C__flush_invalidate_cache(), and disable it
+ * afterwards.
*
*-------------------------------------------------------------------------
*/
@@ -835,10 +974,21 @@ H5C_evict(H5F_t *f)
/* Sanity check */
HDassert(f);
+ /* Enable the slist, as it is needed in the flush */
+ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
/* Flush and invalidate all cache entries except the pinned entries */
if (H5C__flush_invalidate_cache(f, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache")
+ /* Disable the slist */
+ if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist disabled failed")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_evict() */
@@ -910,7 +1060,6 @@ H5C_evict_or_refresh_all_entries_in_page(H5F_t *f, uint64_t page, uint32_t lengt
H5C_cache_entry_t *entry_ptr;
H5C_cache_entry_t *follow_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
- hbool_t found = FALSE;
FUNC_ENTER_NOAPI(FAIL)
@@ -950,8 +1099,6 @@ H5C_evict_or_refresh_all_entries_in_page(H5F_t *f, uint64_t page, uint32_t lengt
HDassert(length == cache_ptr->page_size ||
page * cache_ptr->page_size + length <= entry_ptr->addr + entry_ptr->size);
- found = TRUE;
-
/* since end of tick occurs only on API call entry in
* the VFD SWMR reader case, the entry must not be protected.
*
@@ -1226,8 +1373,8 @@ done:
* Function: H5C_expunge_entry
*
* Purpose: Use this function to tell the cache to expunge an entry
- * from the cache without writing it to disk even if it is
- * dirty. The entry may not be either pinned or protected.
+ * from the cache without writing it to disk even if it is
+ * dirty. The entry may not be either pinned or protected.
*
* Return: Non-negative on success/Negative on failure
*
@@ -1255,7 +1402,7 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag
HDassert(H5F_addr_defined(addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C_validate_lru_list(cache_ptr) < 0)
+ if (H5C__validate_lru_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1290,7 +1437,7 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C_validate_lru_list(cache_ptr) < 0)
+ if (H5C__validate_lru_list(cache_ptr) < 0)
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1397,8 +1544,8 @@ H5C_flush_cache(H5F_t *f, unsigned flags)
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1577,8 +1724,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
#if H5C_DO_EXTREME_SANITY_CHECKS
/* no need to verify that entry is not already in the index as */
/* we already make that check below. */
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1713,50 +1860,34 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
if (cache_ptr->flash_size_increase_possible &&
- (entry_ptr->size > cache_ptr->flash_size_increase_threshold)) {
-
+ (entry_ptr->size > cache_ptr->flash_size_increase_threshold))
if (H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
-
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed")
- }
-
- if (cache_ptr->index_size >= cache_ptr->max_cache_size) {
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
- }
- else {
-
+ else
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
- if ((cache_ptr->evictions_enabled) &&
+ if (cache_ptr->evictions_enabled &&
(((cache_ptr->index_size + entry_ptr->size) > cache_ptr->max_cache_size) ||
- ((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size))) {
+ (((empty_space + cache_ptr->clean_index_size) < cache_ptr->min_clean_size)))) {
size_t space_needed;
- if (empty_space <= entry_ptr->size) {
-
+ if (empty_space <= entry_ptr->size)
cache_ptr->cache_full = TRUE;
- }
if (cache_ptr->check_write_permitted != NULL) {
-
if ((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
-
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "Can't get write_permitted")
- }
- else {
-
+ } /* end if */
+ else
write_permitted = cache_ptr->write_permitted;
- }
HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
space_needed = entry_ptr->size;
-
- if (space_needed > cache_ptr->max_cache_size) {
-
+ if (space_needed > cache_ptr->max_cache_size)
space_needed = cache_ptr->max_cache_size;
- }
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
@@ -1796,17 +1927,15 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* If the entry's type has a 'notify' callback send a 'after insertion'
* notice now that the entry is fully integrated into the cache.
*/
- if ((entry_ptr->type->notify) &&
- ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0))
-
+ if (entry_ptr->type->notify && (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_INSERT, entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry inserted into cache")
H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
@@ -1828,8 +1957,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1843,27 +1972,27 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_mark_entry_dirty
*
- * Purpose: Mark a pinned or protected entry as dirty. The target entry
- * MUST be either pinned or protected, and MAY be both.
+ * Purpose: Mark a pinned or protected entry as dirty. The target entry
+ * MUST be either pinned or protected, and MAY be both.
*
- * In the protected case, this call is the functional
- * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
- * call.
+ * In the protected case, this call is the functional
+ * equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
+ * call.
*
- * In the pinned but not protected case, if the entry is not
- * already dirty, the function places function marks the entry
- * dirty and places it on the skip list.
+ * In the pinned but not protected case, if the entry is not
+ * already dirty, the function places function marks the entry
+ * dirty and places it on the skip list.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 5/15/06
*
- * JRM -- 11/5/08
- * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY() to
- * update the new clean_index_size and dirty_index_size
- * fields of H5C_t in the case that the entry was clean
- * prior to this call, and is pinned and not protected.
+ * JRM -- 11/5/08
+ * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY() to
+ * update the new clean_index_size and dirty_index_size
+ * fields of H5C_t in the case that the entry was clean
+ * prior to this call, and is pinned and not protected.
*
*-------------------------------------------------------------------------
*/
@@ -1952,11 +2081,11 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_mark_entry_clean
*
- * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned.
+ * Purpose: Mark a pinned entry as clean. The target entry MUST be pinned.
*
- * If the entry is not
- * already clean, the function places function marks the entry
- * clean and removes it from the skip list.
+ * If the entry is not
+ * already clean, the function places function marks the entry
+ * clean and removes it from the skip list.
*
* Return: Non-negative on success/Negative on failure
*
@@ -2031,8 +2160,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_mark_entry_unserialized
*
- * Purpose: Mark a pinned or protected entry as unserialized. The target
- * entry MUST be either pinned or protected, and MAY be both.
+ * Purpose: Mark a pinned or protected entry as unserialized. The target
+ * entry MUST be either pinned or protected, and MAY be both.
*
* Return: Non-negative on success/Negative on failure
*
@@ -2077,8 +2206,8 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_mark_entry_serialized
*
- * Purpose: Mark a pinned entry as serialized. The target entry MUST be
- * pinned.
+ * Purpose: Mark a pinned entry as serialized. The target entry MUST be
+ * pinned.
*
* Return: Non-negative on success/Negative on failure
*
@@ -2161,19 +2290,16 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
HDassert(H5F_addr_ne(old_addr, new_addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
-
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
- if ((entry_ptr == NULL) || (entry_ptr->type != type)) {
-
+ if (entry_ptr == NULL || entry_ptr->type != type)
/* the old item doesn't exist in the cache, so we are done. */
HGOTO_DONE(SUCCEED)
- }
HDassert(entry_ptr->addr == old_addr);
HDassert(entry_ptr->type == type);
@@ -2188,12 +2314,9 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
H5C__SEARCH_INDEX(cache_ptr, new_addr, test_entry_ptr, FAIL)
if (test_entry_ptr != NULL) { /* we are hosed */
-
if (test_entry_ptr->type == type)
-
HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "target already moved & reinserted???")
else
-
HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "new address already in use?")
} /* end if */
@@ -2213,11 +2336,9 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
* don't mark it as dirty either, lest we confuse the flush call back.
*/
if (!entry_ptr->destroy_in_progress) {
-
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
if (entry_ptr->in_slist) {
-
HDassert(cache_ptr->slist_ptr);
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
} /* end if */
@@ -2234,7 +2355,6 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
}
if (!entry_ptr->destroy_in_progress) {
-
hbool_t was_dirty; /* Whether the entry was previously dirty */
/* Remember previous dirty status */
@@ -2245,16 +2365,11 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
/* This shouldn't be needed, but it keeps the test code happy */
if (entry_ptr->image_up_to_date) {
-
entry_ptr->image_up_to_date = FALSE;
-
- if (entry_ptr->flush_dep_nparents > 0) {
-
+ if (entry_ptr->flush_dep_nparents > 0)
if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
-
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
"Can't propagate serialization status to fd parents")
- }
} /* end if */
/* Modify cache data structures */
@@ -2263,33 +2378,24 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
/* Skip some actions if we're in the middle of flushing the entry */
if (!entry_ptr->flush_in_progress) {
-
/* Update the replacement policy for the entry */
H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
/* Check for entry changing status and do notifications, etc. */
if (!was_dirty) {
-
- /* If the entry's type has a 'notify' callback send a 'entry
- * dirtied' notice now that the entry is fully integrated
- * into the cache.
+ /* If the entry's type has a 'notify' callback send a 'entry dirtied'
+ * notice now that the entry is fully integrated into the cache.
*/
- if ((entry_ptr->type->notify) &&
- ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0))
-
+ if (entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
"can't notify client about entry dirty flag set")
- /* Propagate the dirty flag up the flush dependency chain
- * if appropriate
- */
- if (entry_ptr->flush_dep_nparents > 0) {
-
+ /* Propagate the dirty flag up the flush dependency chain if appropriate */
+ if (entry_ptr->flush_dep_nparents > 0)
if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
-
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL,
"Can't propagate flush dep dirty flag")
- }
} /* end if */
} /* end if */
} /* end if */
@@ -2297,26 +2403,23 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
-
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_move_entry() */
/*-------------------------------------------------------------------------
* Function: H5C_resize_entry
*
- * Purpose: Resize a pinned or protected entry.
+ * Purpose: Resize a pinned or protected entry.
*
- * Resizing an entry dirties it, so if the entry is not
- * already dirty, the function places the entry on the
- * skip list.
+ * Resizing an entry dirties it, so if the entry is not
+ * already dirty, the function places the entry on the
+ * skip list.
*
* Return: Non-negative on success/Negative on failure
*
@@ -2348,7 +2451,8 @@ H5C_resize_entry(void *thing, size_t new_size)
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || (H5C_validate_pinned_entry_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2444,7 +2548,8 @@ H5C_resize_entry(void *thing, size_t new_size)
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || (H5C_validate_pinned_entry_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2454,15 +2559,15 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_pin_protected_entry()
*
- * Purpose: Pin a protected cache entry. The entry must be protected
- * at the time of call, and must be unpinned.
+ * Purpose: Pin a protected cache entry. The entry must be protected
+ * at the time of call, and must be unpinned.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 4/26/06
*
- * Changes: Added extreme sanity checks on entry and exit.
+ * Changes: Added extreme sanity checks on entry and exit.
* JRM -- 4/26/14
*
*-------------------------------------------------------------------------
@@ -2484,8 +2589,8 @@ H5C_pin_protected_entry(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2499,8 +2604,8 @@ H5C_pin_protected_entry(void *thing)
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2541,8 +2646,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
#ifdef H5_HAVE_PARALLEL
hbool_t coll_access = FALSE; /* whether access to the cache entry is done collectively */
#endif /* H5_HAVE_PARALLEL */
- hbool_t write_permitted;
- hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */
+ hbool_t write_permitted = FALSE;
+ hbool_t was_loaded = FALSE; /* Whether the entry was loaded as a result of the protect */
size_t empty_space;
void * thing;
H5C_cache_entry_t *entry_ptr;
@@ -2563,8 +2668,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
HDassert(H5F_addr_defined(addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2692,11 +2797,11 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
hit = FALSE;
- if (NULL == (thing = H5C_load_entry(f,
+ if (NULL == (thing = H5C__load_entry(f,
#ifdef H5_HAVE_PARALLEL
- coll_access,
+ coll_access,
#endif /* H5_HAVE_PARALLEL */
- type, addr, udata)))
+ type, addr, udata)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
entry_ptr = (H5C_cache_entry_t *)thing;
@@ -2938,8 +3043,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3213,6 +3318,174 @@ done:
} /* H5C_set_evictions_enabled() */
/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_set_slist_enabled()
+ *
+ * Purpose: Enable or disable the slist as directed.
+ *
+ * The slist (skip list) is an address ordered list of
+ * dirty entries in the metadata cache. However, this
+ * list is only needed during flush and close, where we
+ * use it to write entries in more or less increasing
+ * address order.
+ *
+ * This function sets up and enables further operations
+ * on the slist, or disable the slist. This in turn
+ * allows us to avoid the overhead of maintaining the
+ * slist when it is not needed.
+ *
+ *
+ * If the slist_enabled parameter is TRUE, the function
+ *
+ * 1) Verifies that the slist is empty.
+ *
+ * 2) Scans the index list, and inserts all dirty entries
+ * into the slist.
+ *
+ * 3) Sets cache_ptr->slist_enabled = TRUE.
+ *
+ * Note that the clear_slist parameter is ignored if
+ * the slist_enabed parameter is TRUE.
+ *
+ *
+ * If the slist_enabled_parameter is FALSE, the function
+ * shuts down the slist.
+ *
+ * Normally the slist will be empty at this point, however
+ * that need not be the case if H5C_flush_cache() has been
+ * called with the H5C__FLUSH_MARKED_ENTRIES_FLAG.
+ *
+ * Thus shutdown proceeds as follows:
+ *
+ * 1) Test to see if the slist is empty. If it is, proceed
+ * to step 3.
+ *
+ * 2) Test to see if the clear_slist parameter is TRUE.
+ *
+ * If it is, remove all entries from the slist.
+ *
+ * If it isn't, throw an error.
+ *
+ * 3) set cache_ptr->slist_enabled = FALSE.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 5/1/20
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_slist)
+{
+ H5C_cache_entry_t *entry_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if ((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry")
+
+#if H5C__SLIST_OPT_ENABLED
+
+ if (slist_enabled) {
+
+ if (cache_ptr->slist_enabled) {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already enabled?")
+ }
+
+ if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (1)?")
+ }
+
+ /* set cache_ptr->slist_enabled to TRUE so that the slist
+ * mainenance macros will be enabled.
+ */
+ cache_ptr->slist_enabled = TRUE;
+
+ /* scan the index list and insert all dirty entries in the slist */
+ entry_ptr = cache_ptr->il_head;
+
+ while (entry_ptr != NULL) {
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ if (entry_ptr->is_dirty) {
+
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
+
+ entry_ptr = entry_ptr->il_next;
+ }
+
+ /* we don't maintain a dirty index len, so we can't do a cross
+ * check against it. Note that there is no point in cross checking
+ * against the dirty LRU size, as the dirty LRU may not be maintained,
+ * and in any case, there is no requirement that all dirty entries
+ * will reside on the dirty LRU.
+ */
+ HDassert(cache_ptr->dirty_index_size == cache_ptr->slist_size);
+ }
+ else { /* take down the skip list */
+
+ if (!cache_ptr->slist_enabled) {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist already disabled?")
+ }
+
+ if ((cache_ptr->slist_len != 0) || (cache_ptr->slist_size != 0)) {
+
+ if (clear_slist) {
+
+ H5SL_node_t *node_ptr;
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ while (node_ptr != NULL) {
+
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
+
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ }
+ }
+ else {
+
+ HDassert(FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "slist not empty (2)?")
+ }
+ }
+
+ cache_ptr->slist_enabled = FALSE;
+
+ HDassert(0 == cache_ptr->slist_len);
+ HDassert(0 == cache_ptr->slist_size);
+ }
+
+#else /* H5C__SLIST_OPT_ENABLED is FALSE */
+
+ HDassert(cache_ptr->slist_enabled);
+
+#endif /* H5C__SLIST_OPT_ENABLED is FALSE */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_set_slist_enabled() */
+
+/*-------------------------------------------------------------------------
* Function: H5C_set_vfd_swmr_reader()
*
* Purpose: Set cache_ptr->vfd_swmr_reader and cache_ptr->page_size to
@@ -3250,16 +3523,16 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C_unpin_entry()
*
- * Purpose: Unpin a cache entry. The entry can be either protected or
- * unprotected at the time of call, but must be pinned.
+ * Purpose: Unpin a cache entry. The entry can be either protected or
+ * unprotected at the time of call, but must be pinned.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 3/22/06
*
- * Changes: Added extreme sanity checks on entry and exit.
- JRM -- 4/26/14
+ * Changes: Added extreme sanity checks on entry and exit.
+ * JRM -- 4/26/14
*
*-------------------------------------------------------------------------
*/
@@ -3279,8 +3552,8 @@ H5C_unpin_entry(void *_entry_ptr)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3290,8 +3563,8 @@ H5C_unpin_entry(void *_entry_ptr)
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3321,6 +3594,81 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated for the addition of the hash table.
+ *
+ * JRM -- 10/28/04
+ * Added code to set cache_full to TRUE whenever we try to
+ * make space in the cache.
+ *
+ * JRM -- 11/12/04
+ * Added code to call to H5C_make_space_in_cache() after the
+ * call to H5C__auto_adjust_cache_size() if that function
+ * sets the size_decreased flag is TRUE.
+ *
+ * JRM -- 4/25/05
+ * The size_decreased flag can also be set to TRUE in
+ * H5C_set_cache_auto_resize_config() if a new configuration
+ * forces an immediate reduction in cache size. Modified
+ * the code to deal with this eventuallity.
+ *
+ * JRM -- 6/24/05
+ * Added support for the new write_permitted field of H5C_t.
+ *
+ * JRM -- 10/22/05
+ * Hand optimizations.
+ *
+ * JRM -- 5/3/06
+ * Added code to set the new dirtied field in
+ * H5C_cache_entry_t to FALSE prior to return.
+ *
+ * JRM -- 6/23/06
+ * Modified code to allow dirty entries to be loaded from
+ * disk. This is necessary as a bug fix in the object
+ * header code requires us to modify a header as it is read.
+ *
+ * JRM -- 3/28/07
+ * Added the flags parameter and supporting code. At least
+ * for now, this parameter is used to allow the entry to
+ * be protected read only, thus allowing multiple protects.
+ *
+ * Also added code to allow multiple read only protects
+ * of cache entries.
+ *
+ * JRM -- 7/27/07
+ * Added code supporting the new evictions_enabled field
+ * in H5C_t.
+ *
+ * JRM -- 1/3/08
+ * Added to do a flash cache size increase if appropriate
+ * when a large entry is loaded.
+ *
+ * JRM -- 11/13/08
+ * Modified function to call H5C_make_space_in_cache() when
+ * the min_clean_size is violated, not just when there isn't
+ * enough space for and entry that has just been loaded.
+ *
+ * The purpose of this modification is to avoid "metadata
+ * blizzards" in the write only case. In such instances,
+ * the cache was allowed to fill with dirty metadata. When
+ * we finally needed to evict an entry to make space, we had
+ * to flush out a whole cache full of metadata -- which has
+ * interesting performance effects. We hope to avoid (or
+ * perhaps more accurately hide) this effect by maintaining
+ * the min_clean_size, which should force us to start flushing
+ * entries long before we actually have to evict something
+ * to make space.
+ *
+ *
+ * Missing entries?
+ *
+ *
+ * JRM -- 5/8/20
+ * Updated for the possibility that the slist will be
+ * disabled.
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3362,9 +3710,15 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
HDassert(H5F_addr_defined(addr));
HDassert(thing);
HDassert(!(pin_entry && unpin_entry));
- HDassert((!free_file_space) || (deleted)); /* deleted flag must accompany free_file_space */
- HDassert((!take_ownership) || (deleted)); /* deleted flag must accompany take_ownership */
- HDassert(!(free_file_space && take_ownership)); /* can't have both free_file_space & take_ownership */
+
+ /* deleted flag must accompany free_file_space */
+ HDassert((!free_file_space) || (deleted));
+
+ /* deleted flag must accompany take_ownership */
+ HDassert((!take_ownership) || (deleted));
+
+ /* can't have both free_file_space & take_ownership */
+ HDassert(!(free_file_space && take_ownership));
entry_ptr = (H5C_cache_entry_t *)thing;
@@ -3377,8 +3731,9 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
was_clean = !(entry_ptr->is_dirty);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3465,16 +3820,22 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
/* Mark the entry as dirty if appropriate */
entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
- if (dirtied)
+ if (dirtied) {
+
if (entry_ptr->image_up_to_date) {
entry_ptr->image_up_to_date = FALSE;
- if (entry_ptr->flush_dep_nparents > 0)
+
+ if (entry_ptr->flush_dep_nparents > 0) {
+
if (H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
"Can't propagate serialization status to fd parents")
- } /* end if */
+
+ } /* end if */
+ } /* end if */
+ } /* end if */
/* Check for newly dirtied entry */
if (was_clean && entry_ptr->is_dirty) {
@@ -3482,36 +3843,47 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
/* Update index for newly dirtied entry */
H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
- /* If the entry's type has a 'notify' callback send a 'entry dirtied'
- * notice now that the entry is fully integrated into the cache.
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry dirtied' notice now that the entry is fully
+ * integrated into the cache.
*/
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0)
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_DIRTIED, entry_ptr) < 0))
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag set")
/* Propagate the flush dep dirty flag up the flush dependency chain
- * if appropriate */
- if (entry_ptr->flush_dep_nparents > 0)
+ * if appropriate
+ */
+ if (entry_ptr->flush_dep_nparents > 0) {
+
if (H5C__mark_flush_dep_dirty(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ }
} /* end if */
/* Check for newly clean entry */
else if (!was_clean && !entry_ptr->is_dirty) {
- /* If the entry's type has a 'notify' callback send a 'entry cleaned'
- * notice now that the entry is fully integrated into the cache.
+
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
*/
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
"can't notify client about entry dirty flag cleared")
/* Propagate the flush dep clean flag up the flush dependency chain
- * if appropriate */
- if (entry_ptr->flush_dep_nparents > 0)
+ * if appropriate
+ */
+ if (entry_ptr->flush_dep_nparents > 0) {
+
if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ }
} /* end else-if */
/* Pin or unpin the entry as requested. */
@@ -3543,8 +3915,12 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
if (entry_ptr->is_dirty) {
entry_ptr->flush_marker |= set_flush_marker;
- if (!entry_ptr->in_slist)
+
+ if (!entry_ptr->in_slist) {
+
+ /* this is a no-op if cache_ptr->slist_enabled is FALSE */
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ }
} /* end if */
/* this implementation of the "deleted" option is a bit inefficient, as
@@ -3557,45 +3933,62 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
* JRM - 5/19/04
*/
if (deleted) {
+
unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__FLUSH_INVALIDATE_FLAG);
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
if (test_entry_ptr == NULL)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
+
else if (test_entry_ptr != entry_ptr)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
"hash table contains multiple entries for addr?!?")
/* Set the 'free file space' flag for the flush, if needed */
- if (free_file_space)
+ if (free_file_space) {
+
flush_flags |= H5C__FREE_FILE_SPACE_FLAG;
+ }
/* Set the "take ownership" flag for the flush, if needed */
- if (take_ownership)
+ if (take_ownership) {
+
flush_flags |= H5C__TAKE_OWNERSHIP_FLAG;
+ }
/* Delete the entry from the skip list on destroy */
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
- HDassert(((!was_clean) || dirtied) == entry_ptr->in_slist);
+ HDassert((!cache_ptr->slist_enabled) || (((!was_clean) || dirtied) == (entry_ptr->in_slist)));
+
if (H5C__flush_single_entry(f, entry_ptr, flush_flags) < 0)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
+
} /* end if */
#ifdef H5_HAVE_PARALLEL
else if (clear_entry) {
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
+
if (test_entry_ptr == NULL)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
+
else if (test_entry_ptr != entry_ptr)
+
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL,
"hash table contains multiple entries for addr?!?")
if (H5C__flush_single_entry(f, entry_ptr,
H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
+
} /* end else if */
#endif /* H5_HAVE_PARALLEL */
}
@@ -3603,575 +3996,578 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
done:
+
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0)) {
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_unprotect() */
+ FUNC_LEAVE_NOAPI(ret_value)
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_unsettle_entry_ring
- *
- * Purpose: Advise the metadata cache that the specified entry's free space
- * manager ring is no longer settled (if it was on entry).
- *
- * If the target free space manager ring is already
- * unsettled, do nothing, and return SUCCEED.
- *
- * If the target free space manager ring is settled, and
- * we are not in the process of a file shutdown, mark
- * the ring as unsettled, and return SUCCEED.
- *
- * If the target free space manager is settled, and we
- * are in the process of a file shutdown, post an error
- * message, and return FAIL.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * January 3, 2017
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_unsettle_entry_ring(void *_entry)
- {
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
- H5C_t * cache; /* Cache for file */
- herr_t ret_value = SUCCEED; /* Return value */
+} /* H5C_unprotect() */
- FUNC_ENTER_NOAPI(FAIL)
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_unsettle_entry_ring
+ *
+ * Purpose: Advise the metadata cache that the specified entry's free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * January 3, 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unsettle_entry_ring(void *_entry)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
+ H5C_t * cache; /* Cache for file */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Sanity checks */
- HDassert(entry);
- HDassert(entry->ring != H5C_RING_UNDEFINED);
- HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) ||
- (H5C_RING_MDFSM == entry->ring));
- cache = entry->cache_ptr;
- HDassert(cache);
- HDassert(cache->magic == H5C__H5C_T_MAGIC);
-
- switch (entry->ring) {
- case H5C_RING_USER:
- /* Do nothing */
- break;
+ FUNC_ENTER_NOAPI(FAIL)
- case H5C_RING_RDFSM:
- if (cache->rdfsm_settled) {
- if (cache->flush_in_progress || cache->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
- cache->rdfsm_settled = FALSE;
- } /* end if */
- break;
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(entry->ring != H5C_RING_UNDEFINED);
+ HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) ||
+ (H5C_RING_MDFSM == entry->ring));
+ cache = entry->cache_ptr;
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ switch (entry->ring) {
+ case H5C_RING_USER:
+ /* Do nothing */
+ break;
- case H5C_RING_MDFSM:
- if (cache->mdfsm_settled) {
- if (cache->flush_in_progress || cache->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
- cache->mdfsm_settled = FALSE;
- } /* end if */
- break;
+ case H5C_RING_RDFSM:
+ if (cache->rdfsm_settled) {
+ if (cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
+ cache->rdfsm_settled = FALSE;
+ } /* end if */
+ break;
- default:
- HDassert(FALSE); /* this should be un-reachable */
- break;
- } /* end switch */
+ case H5C_RING_MDFSM:
+ if (cache->mdfsm_settled) {
+ if (cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
+ cache->mdfsm_settled = FALSE;
+ } /* end if */
+ break;
-done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_unsettle_entry_ring() */
+ default:
+ HDassert(FALSE); /* this should be un-reachable */
+ break;
+ } /* end switch */
- /*-------------------------------------------------------------------------
- * Function: H5C_unsettle_ring()
- *
- * Purpose: Advise the metadata cache that the specified free space
- * manager ring is no longer settled (if it was on entry).
- *
- * If the target free space manager ring is already
- * unsettled, do nothing, and return SUCCEED.
- *
- * If the target free space manager ring is settled, and
- * we are not in the process of a file shutdown, mark
- * the ring as unsettled, and return SUCCEED.
- *
- * If the target free space manager is settled, and we
- * are in the process of a file shutdown, post an error
- * message, and return FAIL.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 10/15/16
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_unsettle_ring(H5F_t * f, H5C_ring_t ring)
- {
- H5C_t *cache_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unsettle_entry_ring() */
- FUNC_ENTER_NOAPI(FAIL)
+/*-------------------------------------------------------------------------
+ * Function: H5C_unsettle_ring()
+ *
+ * Purpose: Advise the metadata cache that the specified free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/15/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring)
+{
+ H5C_t *cache_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->cache);
- HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring));
- cache_ptr = f->shared->cache;
- HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic);
+ FUNC_ENTER_NOAPI(FAIL)
- switch (ring) {
- case H5C_RING_RDFSM:
- if (cache_ptr->rdfsm_settled) {
- if (cache_ptr->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
- cache_ptr->rdfsm_settled = FALSE;
- } /* end if */
- break;
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring));
+ cache_ptr = f->shared->cache;
+ HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic);
+
+ switch (ring) {
+ case H5C_RING_RDFSM:
+ if (cache_ptr->rdfsm_settled) {
+ if (cache_ptr->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
+ cache_ptr->rdfsm_settled = FALSE;
+ } /* end if */
+ break;
- case H5C_RING_MDFSM:
- if (cache_ptr->mdfsm_settled) {
- if (cache_ptr->close_warning_received)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
- cache_ptr->mdfsm_settled = FALSE;
- } /* end if */
- break;
+ case H5C_RING_MDFSM:
+ if (cache_ptr->mdfsm_settled) {
+ if (cache_ptr->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
+ cache_ptr->mdfsm_settled = FALSE;
+ } /* end if */
+ break;
- default:
- HDassert(FALSE); /* this should be un-reachable */
- break;
- } /* end switch */
+ default:
+ HDassert(FALSE); /* this should be un-reachable */
+ break;
+ } /* end switch */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_unsettle_ring() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unsettle_ring() */
- /*-------------------------------------------------------------------------
- * Function: H5C_validate_resize_config()
- *
- * Purpose: Run a sanity check on the specified sections of the
- * provided instance of struct H5C_auto_size_ctl_t.
- *
- * Do nothing and return SUCCEED if no errors are detected,
- * and flag an error and return FAIL otherwise.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 3/23/05
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr, unsigned int tests)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C_validate_resize_config()
+ *
+ * Purpose: Run a sanity check on the specified sections of the
+ * provided instance of struct H5C_auto_size_ctl_t.
+ *
+ * Do nothing and return SUCCEED if no errors are detected,
+ * and flag an error and return FAIL otherwise.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 3/23/05
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr, unsigned int tests)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI(FAIL)
- if (config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
+ if (config_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
- if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
+ if (config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
- if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
- if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
+ if (config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
- if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small")
+ if (config_ptr->min_size < H5C__MIN_MAX_CACHE_SIZE)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size too small")
- if (config_ptr->min_size > config_ptr->max_size)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
+ if (config_ptr->min_size > config_ptr->max_size)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
- if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) ||
- (config_ptr->initial_size > config_ptr->max_size)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "initial_size must be in the interval [min_size, max_size]")
+ if (config_ptr->set_initial_size && ((config_ptr->initial_size < config_ptr->min_size) ||
+ (config_ptr->initial_size > config_ptr->max_size)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "initial_size must be in the interval [min_size, max_size]")
- if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "min_clean_fraction must be in the interval [0.0, 1.0]")
+ if ((config_ptr->min_clean_fraction < 0.0) || (config_ptr->min_clean_fraction > 1.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]")
- if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
+ if (config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
- if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
- } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
+ if (config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
+ } /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
- if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
- if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
+ if ((config_ptr->incr_mode != H5C_incr__off) && (config_ptr->incr_mode != H5C_incr__threshold))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
- if (config_ptr->incr_mode == H5C_incr__threshold) {
- if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "lower_hr_threshold must be in the range [0.0, 1.0]")
+ if (config_ptr->incr_mode == H5C_incr__threshold) {
+ if ((config_ptr->lower_hr_threshold < 0.0) || (config_ptr->lower_hr_threshold > 1.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "lower_hr_threshold must be in the range [0.0, 1.0]")
- if (config_ptr->increment < 1.0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "increment must be greater than or equal to 1.0")
+ if (config_ptr->increment < 1.0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "increment must be greater than or equal to 1.0")
- /* no need to check max_increment, as it is a size_t,
- * and thus must be non-negative.
- */
- } /* H5C_incr__threshold */
+ /* no need to check max_increment, as it is a size_t,
+ * and thus must be non-negative.
+ */
+ } /* H5C_incr__threshold */
- switch (config_ptr->flash_incr_mode) {
- case H5C_flash_incr__off:
- /* nothing to do here */
- break;
+ switch (config_ptr->flash_incr_mode) {
+ case H5C_flash_incr__off:
+ /* nothing to do here */
+ break;
- case H5C_flash_incr__add_space:
- if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "flash_multiple must be in the range [0.1, 10.0]")
- if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "flash_threshold must be in the range [0.1, 1.0]")
- break;
+ case H5C_flash_incr__add_space:
+ if ((config_ptr->flash_multiple < 0.1) || (config_ptr->flash_multiple > 10.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "flash_multiple must be in the range [0.1, 10.0]")
+ if ((config_ptr->flash_threshold < 0.1) || (config_ptr->flash_threshold > 1.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "flash_threshold must be in the range [0.1, 1.0]")
+ break;
- default:
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
- break;
- } /* end switch */
- } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
+ break;
+ } /* end switch */
+ } /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
- if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) {
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_DECREMENT) != 0) {
- if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) &&
- (config_ptr->decr_mode != H5C_decr__age_out) &&
- (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) {
+ if ((config_ptr->decr_mode != H5C_decr__off) && (config_ptr->decr_mode != H5C_decr__threshold) &&
+ (config_ptr->decr_mode != H5C_decr__age_out) &&
+ (config_ptr->decr_mode != H5C_decr__age_out_with_threshold)) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode")
- }
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode")
+ }
- if (config_ptr->decr_mode == H5C_decr__threshold) {
- if (config_ptr->upper_hr_threshold > 1.0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
+ if (config_ptr->decr_mode == H5C_decr__threshold) {
+ if (config_ptr->upper_hr_threshold > 1.0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
- if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
+ if ((config_ptr->decrement > 1.0) || (config_ptr->decrement < 0.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
- /* no need to check max_decrement as it is a size_t
- * and thus must be non-negative.
- */
- } /* H5C_decr__threshold */
+ /* no need to check max_decrement as it is a size_t
+ * and thus must be non-negative.
+ */
+ } /* H5C_decr__threshold */
- if ((config_ptr->decr_mode == H5C_decr__age_out) ||
- (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
+ if ((config_ptr->decr_mode == H5C_decr__age_out) ||
+ (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
- if (config_ptr->epochs_before_eviction < 1)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
- if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
+ if (config_ptr->epochs_before_eviction < 1)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
+ if (config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
- if ((config_ptr->apply_empty_reserve) &&
- ((config_ptr->empty_reserve > 1.0) || (config_ptr->empty_reserve < 0.0)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "empty_reserve must be in the interval [0.0, 1.0]")
+ if ((config_ptr->apply_empty_reserve) &&
+ ((config_ptr->empty_reserve > 1.0) || (config_ptr->empty_reserve < 0.0)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty_reserve must be in the interval [0.0, 1.0]")
- /* no need to check max_decrement as it is a size_t
- * and thus must be non-negative.
- */
- } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
+ /* no need to check max_decrement as it is a size_t
+ * and thus must be non-negative.
+ */
+ } /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
- if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
- if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
- "upper_hr_threshold must be in the interval [0.0, 1.0]")
- } /* H5C_decr__age_out_with_threshold */
- } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
-
- if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) {
- if ((config_ptr->incr_mode == H5C_incr__threshold) &&
- ((config_ptr->decr_mode == H5C_decr__threshold) ||
- (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) &&
- (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
- } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
+ if (config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
+ if ((config_ptr->upper_hr_threshold > 1.0) || (config_ptr->upper_hr_threshold < 0.0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "upper_hr_threshold must be in the interval [0.0, 1.0]")
+ } /* H5C_decr__age_out_with_threshold */
+ } /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
+
+ if ((tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0) {
+ if ((config_ptr->incr_mode == H5C_incr__threshold) &&
+ ((config_ptr->decr_mode == H5C_decr__threshold) ||
+ (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) &&
+ (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
+ } /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_validate_resize_config() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_validate_resize_config() */
- /*-------------------------------------------------------------------------
- * Function: H5C_create_flush_dependency()
- *
- * Purpose: Initiates a parent<->child entry flush dependency. The parent
- * entry must be pinned or protected at the time of call, and must
- * have all dependencies removed before the cache can shut down.
- *
- * Note: Flush dependencies in the cache indicate that a child entry
- * must be flushed to the file before its parent. (This is
- * currently used to implement Single-Writer/Multiple-Reader (SWMR)
- * I/O access for data structures in the file).
- *
- * Creating a flush dependency between two entries will also pin
- * the parent entry.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_create_flush_dependency(void *parent_thing, void *child_thing)
- {
- H5C_t * cache_ptr;
- H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
- H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C_create_flush_dependency()
+ *
+ * Purpose: Initiates a parent<->child entry flush dependency. The parent
+ * entry must be pinned or protected at the time of call, and must
+ * have all dependencies removed before the cache can shut down.
+ *
+ * Note: Flush dependencies in the cache indicate that a child entry
+ * must be flushed to the file before its parent. (This is
+ * currently used to implement Single-Writer/Multiple-Reader (SWMR)
+ * I/O access for data structures in the file).
+ *
+ * Creating a flush dependency between two entries will also pin
+ * the parent entry.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_create_flush_dependency(void *parent_thing, void *child_thing)
+{
+ H5C_t * cache_ptr;
+ H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
+ H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI(FAIL)
- /* Sanity checks */
- HDassert(parent_entry);
- HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(child_entry);
- HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(child_entry->addr));
- cache_ptr = parent_entry->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr == child_entry->cache_ptr);
+ /* Sanity checks */
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+ cache_ptr = parent_entry->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr == child_entry->cache_ptr);
#ifndef NDEBUG
- /* Make sure the parent is not already a parent */
- {
- unsigned u;
+ /* Make sure the parent is not already a parent */
+ {
+ unsigned u;
- for (u = 0; u < child_entry->flush_dep_nparents; u++)
- HDassert(child_entry->flush_dep_parent[u] != parent_entry);
- } /* end block */
-#endif /* NDEBUG */
+ for (u = 0; u < child_entry->flush_dep_nparents; u++)
+ HDassert(child_entry->flush_dep_parent[u] != parent_entry);
+ } /* end block */
+#endif /* NDEBUG */
- /* More sanity checks */
- if (child_entry == parent_entry)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL,
- "Child entry flush dependency parent can't be itself")
- if (!(parent_entry->is_protected || parent_entry->is_pinned))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
+ /* More sanity checks */
+ if (child_entry == parent_entry)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
+ if (!(parent_entry->is_protected || parent_entry->is_pinned))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
- /* Check for parent not pinned */
- if (!parent_entry->is_pinned) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_nchildren == 0);
- HDassert(!parent_entry->pinned_from_client);
- HDassert(!parent_entry->pinned_from_cache);
+ /* Check for parent not pinned */
+ if (!parent_entry->is_pinned) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_nchildren == 0);
+ HDassert(!parent_entry->pinned_from_client);
+ HDassert(!parent_entry->pinned_from_cache);
- /* Pin the parent entry */
- parent_entry->is_pinned = TRUE;
- H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
- } /* end else */
+ /* Pin the parent entry */
+ parent_entry->is_pinned = TRUE;
+ H5C__UPDATE_STATS_FOR_PIN(cache_ptr, parent_entry)
+ } /* end else */
- /* Mark the entry as pinned from the cache's action (possibly redundantly) */
- parent_entry->pinned_from_cache = TRUE;
+ /* Mark the entry as pinned from the cache's action (possibly redundantly) */
+ parent_entry->pinned_from_cache = TRUE;
- /* Check if we need to resize the child's parent array */
- if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
- if (child_entry->flush_dep_parent_nalloc == 0) {
- /* Array does not exist yet, allocate it */
- HDassert(!child_entry->flush_dep_parent);
+ /* Check if we need to resize the child's parent array */
+ if (child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
+ if (child_entry->flush_dep_parent_nalloc == 0) {
+ /* Array does not exist yet, allocate it */
+ HDassert(!child_entry->flush_dep_parent);
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
- } /* end if */
- else {
- /* Resize existing array */
- HDassert(child_entry->flush_dep_parent);
-
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
- 2 * child_entry->flush_dep_parent_nalloc)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc *= 2;
- } /* end else */
- cache_ptr->entry_fd_height_change_counter++;
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_MALLOC(H5C_cache_entry_ptr_t, H5C_FLUSH_DEP_PARENT_INIT)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
} /* end if */
+ else {
+ /* Resize existing array */
+ HDassert(child_entry->flush_dep_parent);
+
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
+ 2 * child_entry->flush_dep_parent_nalloc)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc *= 2;
+ } /* end else */
+ cache_ptr->entry_fd_height_change_counter++;
+ } /* end if */
- /* Add the dependency to the child's parent array */
- child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
- child_entry->flush_dep_nparents++;
+ /* Add the dependency to the child's parent array */
+ child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
+ child_entry->flush_dep_nparents++;
- /* Increment parent's number of children */
- parent_entry->flush_dep_nchildren++;
+ /* Increment parent's number of children */
+ parent_entry->flush_dep_nchildren++;
- /* Adjust the number of dirty children */
- if (child_entry->is_dirty) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
+ /* Adjust the number of dirty children */
+ if (child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
- parent_entry->flush_dep_ndirty_children++;
+ parent_entry->flush_dep_ndirty_children++;
- /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag set")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag set")
+ } /* end if */
- /* adjust the parent's number of unserialized children. Note
- * that it is possible for and entry to be clean and unserialized.
- */
- if (!child_entry->image_up_to_date) {
- HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
+ /* adjust the parent's number of unserialized children. Note
+ * that it is possible for and entry to be clean and unserialized.
+ */
+ if (!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
- parent_entry->flush_dep_nunser_children++;
+ parent_entry->flush_dep_nunser_children++;
- /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag reset")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag reset")
+ } /* end if */
- /* Post-conditions, for successful operation */
- HDassert(parent_entry->is_pinned);
- HDassert(parent_entry->flush_dep_nchildren > 0);
- HDassert(child_entry->flush_dep_parent);
- HDassert(child_entry->flush_dep_nparents > 0);
- HDassert(child_entry->flush_dep_parent_nalloc > 0);
+ /* Post-conditions, for successful operation */
+ HDassert(parent_entry->is_pinned);
+ HDassert(parent_entry->flush_dep_nchildren > 0);
+ HDassert(child_entry->flush_dep_parent);
+ HDassert(child_entry->flush_dep_nparents > 0);
+ HDassert(child_entry->flush_dep_parent_nalloc > 0);
#ifndef NDEBUG
- H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
+ H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
#endif /* NDEBUG */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_create_flush_dependency() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_create_flush_dependency() */
- /*-------------------------------------------------------------------------
- * Function: H5C_destroy_flush_dependency()
- *
- * Purpose: Terminates a parent<-> child entry flush dependency. The
- * parent entry must be pinned.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing)
- {
- H5C_t * cache_ptr;
- H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
- H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C_destroy_flush_dependency()
+ *
+ * Purpose: Terminates a parent<-> child entry flush dependency. The
+ * parent entry must be pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/05/09
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_destroy_flush_dependency(void *parent_thing, void *child_thing)
+{
+ H5C_t * cache_ptr;
+ H5C_cache_entry_t *parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
+ H5C_cache_entry_t *child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI(FAIL)
- /* Sanity checks */
- HDassert(parent_entry);
- HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(child_entry);
- HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(H5F_addr_defined(child_entry->addr));
- cache_ptr = parent_entry->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr == child_entry->cache_ptr);
-
- /* Usage checks */
- if (!parent_entry->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
- if (NULL == child_entry->flush_dep_parent)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Child entry doesn't have a flush dependency parent array")
- if (0 == parent_entry->flush_dep_nchildren)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Parent entry flush dependency ref. count has no child dependencies")
-
- /* Search for parent in child's parent array. This is a linear search
- * because we do not expect large numbers of parents. If this changes, we
- * may wish to change the parent array to a skip list */
- for (u = 0; u < child_entry->flush_dep_nparents; u++)
- if (child_entry->flush_dep_parent[u] == parent_entry)
- break;
- if (u == child_entry->flush_dep_nparents)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
- "Parent entry isn't a flush dependency parent for child entry")
-
- /* Remove parent entry from child's parent array */
- if (u < (child_entry->flush_dep_nparents - 1))
- HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1],
- (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
- child_entry->flush_dep_nparents--;
-
- /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
- parent_entry->flush_dep_nchildren--;
- if (0 == parent_entry->flush_dep_nchildren) {
- /* Sanity check */
- HDassert(parent_entry->pinned_from_cache);
+ /* Sanity checks */
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_entry->addr));
+ HDassert(child_entry);
+ HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(H5F_addr_defined(child_entry->addr));
+ cache_ptr = parent_entry->cache_ptr;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr == child_entry->cache_ptr);
+
+ /* Usage checks */
+ if (!parent_entry->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
+ if (NULL == child_entry->flush_dep_parent)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Child entry doesn't have a flush dependency parent array")
+ if (0 == parent_entry->flush_dep_nchildren)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Parent entry flush dependency ref. count has no child dependencies")
+
+ /* Search for parent in child's parent array. This is a linear search
+ * because we do not expect large numbers of parents. If this changes, we
+ * may wish to change the parent array to a skip list */
+ for (u = 0; u < child_entry->flush_dep_nparents; u++)
+ if (child_entry->flush_dep_parent[u] == parent_entry)
+ break;
+ if (u == child_entry->flush_dep_nparents)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL,
+ "Parent entry isn't a flush dependency parent for child entry")
+
+ /* Remove parent entry from child's parent array */
+ if (u < (child_entry->flush_dep_nparents - 1))
+ HDmemmove(&child_entry->flush_dep_parent[u], &child_entry->flush_dep_parent[u + 1],
+ (child_entry->flush_dep_nparents - u - 1) * sizeof(child_entry->flush_dep_parent[0]));
+ child_entry->flush_dep_nparents--;
+
+ /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
+ parent_entry->flush_dep_nchildren--;
+ if (0 == parent_entry->flush_dep_nchildren) {
+ /* Sanity check */
+ HDassert(parent_entry->pinned_from_cache);
- /* Check if we should unpin parent entry now */
- if (!parent_entry->pinned_from_client)
- if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
+ /* Check if we should unpin parent entry now */
+ if (!parent_entry->pinned_from_client)
+ if (H5C__unpin_entry_real(cache_ptr, parent_entry, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry")
- /* Mark the entry as unpinned from the cache's action */
- parent_entry->pinned_from_cache = FALSE;
- } /* end if */
+ /* Mark the entry as unpinned from the cache's action */
+ parent_entry->pinned_from_cache = FALSE;
+ } /* end if */
- /* Adjust parent entry's ndirty_children */
- if (child_entry->is_dirty) {
- /* Sanity check */
- HDassert(parent_entry->flush_dep_ndirty_children > 0);
+ /* Adjust parent entry's ndirty_children */
+ if (child_entry->is_dirty) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children > 0);
- parent_entry->flush_dep_ndirty_children--;
+ parent_entry->flush_dep_ndirty_children--;
- /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag reset")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag reset")
+ } /* end if */
- /* adjust parent entry's number of unserialized children */
- if (!child_entry->image_up_to_date) {
- HDassert(parent_entry->flush_dep_nunser_children > 0);
+ /* adjust parent entry's number of unserialized children */
+ if (!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children > 0);
- parent_entry->flush_dep_nunser_children--;
+ parent_entry->flush_dep_nunser_children--;
- /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
- if (parent_entry->type->notify &&
- (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag set")
- } /* end if */
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if (parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag set")
+ } /* end if */
- /* Shrink or free the parent array if apporpriate */
- if (child_entry->flush_dep_nparents == 0) {
- child_entry->flush_dep_parent =
- H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent);
- child_entry->flush_dep_parent_nalloc = 0;
- } /* end if */
- else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT &&
- child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
- if (NULL == (child_entry->flush_dep_parent =
- H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
- child_entry->flush_dep_parent_nalloc / 4)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
- "memory allocation failed for flush dependency parent list")
- child_entry->flush_dep_parent_nalloc /= 4;
- } /* end if */
+ /* Shrink or free the parent array if apporpriate */
+ if (child_entry->flush_dep_nparents == 0) {
+ child_entry->flush_dep_parent = H5FL_SEQ_FREE(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent);
+ child_entry->flush_dep_parent_nalloc = 0;
+ } /* end if */
+ else if (child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT &&
+ child_entry->flush_dep_nparents <= (child_entry->flush_dep_parent_nalloc / 4)) {
+ if (NULL == (child_entry->flush_dep_parent =
+ H5FL_SEQ_REALLOC(H5C_cache_entry_ptr_t, child_entry->flush_dep_parent,
+ child_entry->flush_dep_parent_nalloc / 4)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
+ "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc /= 4;
+ } /* end if */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_destroy_flush_dependency() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_destroy_flush_dependency() */
/*************************************************************************/
/**************************** Private Functions: *************************/
@@ -4180,7 +4576,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5C__pin_entry_from_client()
*
- * Purpose: Internal routine to pin a cache entry from a client action.
+ * Purpose: Internal routine to pin a cache entry from a client action.
*
* Return: Non-negative on success/Negative on failure
*
@@ -4189,544 +4585,900 @@ done:
*
*-------------------------------------------------------------------------
*/
-#if H5C_COLLECT_CACHE_STATS
- static herr_t H5C__pin_entry_from_client(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr)
-#else
static herr_t
-H5C__pin_entry_from_client(H5C_t H5_ATTR_UNUSED *cache_ptr, H5C_cache_entry_t *entry_ptr)
+H5C__pin_entry_from_client(H5C_t
+#if !H5C_COLLECT_CACHE_STATS
+ H5_ATTR_UNUSED
#endif
- {
- herr_t ret_value = SUCCEED; /* Return value */
+ * cache_ptr,
+ H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
- HDassert(entry_ptr->is_protected);
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_protected);
- /* Check if the entry is already pinned */
- if (entry_ptr->is_pinned) {
- /* Check if the entry was pinned through an explicit pin from a client */
- if (entry_ptr->pinned_from_client)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
- } /* end if */
- else {
- entry_ptr->is_pinned = TRUE;
+ /* Check if the entry is already pinned */
+ if (entry_ptr->is_pinned) {
+ /* Check if the entry was pinned through an explicit pin from a client */
+ if (entry_ptr->pinned_from_client)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "entry is already pinned")
+ } /* end if */
+ else {
+ entry_ptr->is_pinned = TRUE;
- H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
- } /* end else */
+ H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
+ } /* end else */
- /* Mark that the entry was pinned through an explicit pin from a client */
- entry_ptr->pinned_from_client = TRUE;
+ /* Mark that the entry was pinned through an explicit pin from a client */
+ entry_ptr->pinned_from_client = TRUE;
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__pin_entry_from_client() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__pin_entry_from_client() */
- /*-------------------------------------------------------------------------
- * Function: H5C__unpin_entry_real()
- *
- * Purpose: Internal routine to unpin a cache entry.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 1/6/18
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__unpin_entry_real(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr, hbool_t update_rp)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__unpin_entry_real()
+ *
+ * Purpose: Internal routine to unpin a cache entry.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 1/6/18
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__unpin_entry_real(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
#if H5C_DO_SANITY_CHECKS
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
#else
FUNC_ENTER_STATIC_NOERR
#endif
- /* Sanity checking */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
- HDassert(entry_ptr->is_pinned);
+ /* Sanity checking */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->is_pinned);
- /* If requested, update the replacement policy if the entry is not protected */
- if (update_rp && !entry_ptr->is_protected)
- H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
+ /* If requested, update the replacement policy if the entry is not protected */
+ if (update_rp && !entry_ptr->is_protected)
+ H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, FAIL)
- /* Unpin the entry now */
- entry_ptr->is_pinned = FALSE;
+ /* Unpin the entry now */
+ entry_ptr->is_pinned = FALSE;
- /* Update the stats for an unpin operation */
- H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
+ /* Update the stats for an unpin operation */
+ H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
#if H5C_DO_SANITY_CHECKS
done:
#endif
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__unpin_entry_real() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__unpin_entry_real() */
- /*-------------------------------------------------------------------------
- * Function: H5C__unpin_entry_from_client()
- *
- * Purpose: Internal routine to unpin a cache entry from a client action.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/24/09
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__unpin_entry_from_client(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr,
- hbool_t update_rp)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__unpin_entry_from_client()
+ *
+ * Purpose: Internal routine to unpin a cache entry from a client action.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 3/24/09
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__unpin_entry_from_client(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr, hbool_t update_rp)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checking */
- HDassert(cache_ptr);
- HDassert(entry_ptr);
+ /* Sanity checking */
+ HDassert(cache_ptr);
+ HDassert(entry_ptr);
- /* Error checking (should be sanity checks?) */
- if (!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
- if (!entry_ptr->pinned_from_client)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
+ /* Error checking (should be sanity checks?) */
+ if (!entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry isn't pinned")
+ if (!entry_ptr->pinned_from_client)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "entry wasn't pinned by cache client")
- /* Check if the entry is not pinned from a flush dependency */
- if (!entry_ptr->pinned_from_cache)
- if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
+ /* Check if the entry is not pinned from a flush dependency */
+ if (!entry_ptr->pinned_from_cache)
+ if (H5C__unpin_entry_real(cache_ptr, entry_ptr, update_rp) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
- /* Mark the entry as explicitly unpinned by the client */
- entry_ptr->pinned_from_client = FALSE;
+ /* Mark the entry as explicitly unpinned by the client */
+ entry_ptr->pinned_from_client = FALSE;
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__unpin_entry_from_client() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__unpin_entry_from_client() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__auto_adjust_cache_size
- *
- * Purpose: Obtain the current full cache hit rate, and compare it
- * with the hit rate thresholds for modifying cache size.
- * If one of the thresholds has been crossed, adjusts the
- * size of the cache accordingly.
- *
- * The function then resets the full cache hit rate
- * statistics, and exits.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- *
- * Programmer: John Mainzer, 10/7/04
- *
- *-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__auto_adjust_cache_size
+ *
+ * Purpose: Obtain the current full cache hit rate, and compare it
+ * with the hit rate thresholds for modifying cache size.
+ * If one of the thresholds has been crossed, adjusts the
+ * size of the cache accordingly.
+ *
+ * The function then resets the full cache hit rate
+ * statistics, and exits.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 10/7/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__auto_adjust_cache_size(H5F_t *f, hbool_t write_permitted)
+{
+ H5C_t * cache_ptr = f->shared->cache;
+ hbool_t reentrant_call = FALSE;
+ hbool_t inserted_epoch_marker = FALSE;
+ size_t new_max_cache_size = 0;
+ size_t old_max_cache_size = 0;
+ size_t new_min_clean_size = 0;
+ size_t old_min_clean_size = 0;
+ double hit_rate;
+ enum H5C_resize_status status = in_spec; /* will change if needed */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length);
+ HDassert(0.0 <= (cache_ptr->resize_ctl).min_clean_fraction);
+ HDassert((cache_ptr->resize_ctl).min_clean_fraction <= 100.0);
+
+ /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the resize
+ * process. To avoid an infinite recursion, set reentrant_call to
+ * TRUE, and goto done.
*/
- static herr_t H5C__auto_adjust_cache_size(H5F_t * f, hbool_t write_permitted)
- {
- H5C_t * cache_ptr = f->shared->cache;
- hbool_t reentrant_call = FALSE;
- hbool_t inserted_epoch_marker = FALSE;
- size_t new_max_cache_size = 0;
- size_t old_max_cache_size = 0;
- size_t new_min_clean_size = 0;
- size_t old_min_clean_size = 0;
- double hit_rate;
- enum H5C_resize_status status = in_spec; /* will change if needed */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length);
- HDassert(0.0 <= (cache_ptr->resize_ctl).min_clean_fraction);
- HDassert((cache_ptr->resize_ctl).min_clean_fraction <= 100.0);
-
- /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the resize
- * process. To avoid an infinite recursion, set reentrant_call to
- * TRUE, and goto done.
- */
- if (cache_ptr->resize_in_progress) {
- reentrant_call = TRUE;
- HGOTO_DONE(SUCCEED)
- } /* end if */
+ if (cache_ptr->resize_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
- cache_ptr->resize_in_progress = TRUE;
+ cache_ptr->resize_in_progress = TRUE;
- if (!cache_ptr->resize_enabled)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
+ if (!cache_ptr->resize_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
- HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) ||
- ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
+ HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) ||
+ ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
- if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+ if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
- HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0));
+ HDassert((0.0 <= hit_rate) && (hit_rate <= 1.0));
- switch ((cache_ptr->resize_ctl).incr_mode) {
- case H5C_incr__off:
- if (cache_ptr->size_increase_possible)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
- break;
+ switch ((cache_ptr->resize_ctl).incr_mode) {
+ case H5C_incr__off:
+ if (cache_ptr->size_increase_possible)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
+ break;
- case H5C_incr__threshold:
- if (hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold) {
+ case H5C_incr__threshold:
+ if (hit_rate < (cache_ptr->resize_ctl).lower_hr_threshold) {
- if (!cache_ptr->size_increase_possible) {
+ if (!cache_ptr->size_increase_possible) {
- status = increase_disabled;
+ status = increase_disabled;
+ }
+ else if (cache_ptr->max_cache_size >= (cache_ptr->resize_ctl).max_size) {
+
+ HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).max_size);
+ status = at_max_size;
+ }
+ else if (!cache_ptr->cache_full) {
+
+ status = not_full;
+ }
+ else {
+
+ new_max_cache_size =
+ (size_t)(((double)(cache_ptr->max_cache_size)) * (cache_ptr->resize_ctl).increment);
+
+ /* clip to max size if necessary */
+ if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
+
+ new_max_cache_size = (cache_ptr->resize_ctl).max_size;
}
- else if (cache_ptr->max_cache_size >= (cache_ptr->resize_ctl).max_size) {
- HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).max_size);
- status = at_max_size;
+ /* clip to max increment if necessary */
+ if (((cache_ptr->resize_ctl).apply_max_increment) &&
+ ((cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment) <
+ new_max_cache_size)) {
+
+ new_max_cache_size =
+ cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment;
}
- else if (!cache_ptr->cache_full) {
- status = not_full;
+ status = increase;
+ }
+ }
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
+ }
+
+ /* If the decr_mode is either age out or age out with threshold, we
+ * must run the marker maintenance code, whether we run the size
+ * reduction code or not. We do this in two places -- here we
+ * insert a new marker if the number of active epoch markers is
+ * is less than the the current epochs before eviction, and after
+ * the ageout call, we cycle the markers.
+ *
+ * However, we can't call the ageout code or cycle the markers
+ * unless there was a full complement of markers in place on
+ * entry. The inserted_epoch_marker flag is used to track this.
+ */
+
+ if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
+ ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
+ (cache_ptr->epoch_markers_active < (cache_ptr->resize_ctl).epochs_before_eviction)) {
+
+ if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
+
+ inserted_epoch_marker = TRUE;
+ }
+
+ /* don't run the cache size decrease code unless the cache size
+ * increase code is disabled, or the size increase code sees no need
+ * for action. In either case, status == in_spec at this point.
+ */
+
+ if (status == in_spec) {
+
+ switch ((cache_ptr->resize_ctl).decr_mode) {
+ case H5C_decr__off:
+ break;
+
+ case H5C_decr__threshold:
+ if (hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold) {
+
+ if (!cache_ptr->size_decrease_possible) {
+
+ status = decrease_disabled;
+ }
+ else if (cache_ptr->max_cache_size <= (cache_ptr->resize_ctl).min_size) {
+
+ HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).min_size);
+ status = at_min_size;
}
else {
new_max_cache_size = (size_t)(((double)(cache_ptr->max_cache_size)) *
- (cache_ptr->resize_ctl).increment);
+ (cache_ptr->resize_ctl).decrement);
- /* clip to max size if necessary */
- if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
+ /* clip to min size if necessary */
+ if (new_max_cache_size < (cache_ptr->resize_ctl).min_size) {
- new_max_cache_size = (cache_ptr->resize_ctl).max_size;
+ new_max_cache_size = (cache_ptr->resize_ctl).min_size;
}
- /* clip to max increment if necessary */
- if (((cache_ptr->resize_ctl).apply_max_increment) &&
- ((cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment) <
- new_max_cache_size)) {
+ /* clip to max decrement if necessary */
+ if (((cache_ptr->resize_ctl).apply_max_decrement) &&
+ (((cache_ptr->resize_ctl).max_decrement + new_max_cache_size) <
+ cache_ptr->max_cache_size)) {
new_max_cache_size =
- cache_ptr->max_cache_size + (cache_ptr->resize_ctl).max_increment;
+ cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
}
- status = increase;
+ status = decrease;
}
}
break;
+ case H5C_decr__age_out_with_threshold:
+ case H5C_decr__age_out:
+ if (!inserted_epoch_marker) {
+ if (!cache_ptr->size_decrease_possible)
+ status = decrease_disabled;
+ else {
+ if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size,
+ write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
+ } /* end else */
+ } /* end if */
+ break;
+
default:
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
}
+ }
+
+ /* cycle the epoch markers here if appropriate */
+ if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
+ ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
+ (!inserted_epoch_marker)) {
+
+ /* move last epoch marker to the head of the LRU list */
+ if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
+ }
+
+ if ((status == increase) || (status == decrease)) {
- /* If the decr_mode is either age out or age out with threshold, we
- * must run the marker maintenance code, whether we run the size
- * reduction code or not. We do this in two places -- here we
- * insert a new marker if the number of active epoch markers is
- * is less than the the current epochs before eviction, and after
- * the ageout call, we cycle the markers.
+ old_max_cache_size = cache_ptr->max_cache_size;
+ old_min_clean_size = cache_ptr->min_clean_size;
+
+ new_min_clean_size =
+ (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
+
+ /* new_min_clean_size is of size_t, and thus must be non-negative.
+ * Hence we have
+ *
+ * ( 0 <= new_min_clean_size ).
*
- * However, we can't call the ageout code or cycle the markers
- * unless there was a full complement of markers in place on
- * entry. The inserted_epoch_marker flag is used to track this.
+ * by definition.
*/
+ HDassert(new_min_clean_size <= new_max_cache_size);
+ HDassert((cache_ptr->resize_ctl).min_size <= new_max_cache_size);
+ HDassert(new_max_cache_size <= (cache_ptr->resize_ctl).max_size);
- if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
- ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
- (cache_ptr->epoch_markers_active < (cache_ptr->resize_ctl).epochs_before_eviction)) {
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
- if (H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
+ if (status == increase) {
- inserted_epoch_marker = TRUE;
+ cache_ptr->cache_full = FALSE;
}
+ else if (status == decrease) {
- /* don't run the cache size decrease code unless the cache size
- * increase code is disabled, or the size increase code sees no need
- * for action. In either case, status == in_spec at this point.
- */
+ cache_ptr->size_decreased = TRUE;
+ }
- if (status == in_spec) {
+ /* update flash cache size increase fields as appropriate */
+ if (cache_ptr->flash_size_increase_possible) {
- switch ((cache_ptr->resize_ctl).decr_mode) {
- case H5C_decr__off:
+ switch ((cache_ptr->resize_ctl).flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
break;
- case H5C_decr__threshold:
- if (hit_rate > (cache_ptr->resize_ctl).upper_hr_threshold) {
+ case H5C_flash_incr__add_space:
+ cache_ptr->flash_size_increase_threshold = (size_t)(
+ ((double)(cache_ptr->max_cache_size)) * ((cache_ptr->resize_ctl).flash_threshold));
+ break;
- if (!cache_ptr->size_decrease_possible) {
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
+ }
+ }
+ }
- status = decrease_disabled;
- }
- else if (cache_ptr->max_cache_size <= (cache_ptr->resize_ctl).min_size) {
+ if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
+ (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate, status,
+ old_max_cache_size, new_max_cache_size, old_min_clean_size,
+ new_min_clean_size);
+ }
- HDassert(cache_ptr->max_cache_size == (cache_ptr->resize_ctl).min_size);
- status = at_min_size;
- }
- else {
+ if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
- new_max_cache_size = (size_t)(((double)(cache_ptr->max_cache_size)) *
- (cache_ptr->resize_ctl).decrement);
+done:
+ /* Sanity checks */
+ HDassert(cache_ptr->resize_in_progress);
+ if (!reentrant_call)
+ cache_ptr->resize_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
- /* clip to min size if necessary */
- if (new_max_cache_size < (cache_ptr->resize_ctl).min_size) {
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__auto_adjust_cache_size() */
- new_max_cache_size = (cache_ptr->resize_ctl).min_size;
- }
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout
+ *
+ * Purpose: Implement the ageout automatic cache size decrement
+ * algorithm. Note that while this code evicts aged out
+ * entries, the code does not change the maximum cache size.
+ * Instead, the function simply computes the new value (if
+ * any change is indicated) and reports this value in
+ * *new_max_cache_size_ptr.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ *
+ * Programmer: John Mainzer, 11/18/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout(H5F_t *f, double hit_rate, enum H5C_resize_status *status_ptr,
+ size_t *new_max_cache_size_ptr, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ size_t test_size;
+ herr_t ret_value = SUCCEED; /* Return value */
- /* clip to max decrement if necessary */
- if (((cache_ptr->resize_ctl).apply_max_decrement) &&
- (((cache_ptr->resize_ctl).max_decrement + new_max_cache_size) <
- cache_ptr->max_cache_size)) {
+ FUNC_ENTER_STATIC
- new_max_cache_size =
- cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
- }
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert((status_ptr) && (*status_ptr == in_spec));
+ HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0));
- status = decrease;
- }
+ /* remove excess epoch markers if any */
+ if (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
+ if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
+
+ if (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
+ (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold) &&
+ (hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold))) {
+
+ if (cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size) {
+
+ /* evict aged out cache entries if appropriate... */
+ if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
+
+ /* ... and then reduce cache size if appropriate */
+ if (cache_ptr->index_size < cache_ptr->max_cache_size) {
+
+ if ((cache_ptr->resize_ctl).apply_empty_reserve) {
+
+ test_size = (size_t)(((double)cache_ptr->index_size) /
+ (1 - (cache_ptr->resize_ctl).empty_reserve));
+
+ if (test_size < cache_ptr->max_cache_size) {
+
+ *status_ptr = decrease;
+ *new_max_cache_size_ptr = test_size;
}
- break;
+ }
+ else {
- case H5C_decr__age_out_with_threshold:
- case H5C_decr__age_out:
- if (!inserted_epoch_marker) {
- if (!cache_ptr->size_decrease_possible)
- status = decrease_disabled;
- else {
- if (H5C__autoadjust__ageout(f, hit_rate, &status, &new_max_cache_size,
- write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
- } /* end else */
- } /* end if */
- break;
+ *status_ptr = decrease;
+ *new_max_cache_size_ptr = cache_ptr->index_size;
+ }
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
+ if (*status_ptr == decrease) {
+
+ /* clip to min size if necessary */
+ if (*new_max_cache_size_ptr < (cache_ptr->resize_ctl).min_size) {
+
+ *new_max_cache_size_ptr = (cache_ptr->resize_ctl).min_size;
+ }
+
+ /* clip to max decrement if necessary */
+ if (((cache_ptr->resize_ctl).apply_max_decrement) &&
+ (((cache_ptr->resize_ctl).max_decrement + *new_max_cache_size_ptr) <
+ cache_ptr->max_cache_size)) {
+
+ *new_max_cache_size_ptr =
+ cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
+ }
+ }
}
}
+ else {
- /* cycle the epoch markers here if appropriate */
- if ((((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
- ((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold)) &&
- (!inserted_epoch_marker)) {
-
- /* move last epoch marker to the head of the LRU list */
- if (H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
+ *status_ptr = at_min_size;
}
+ }
- if ((status == increase) || (status == decrease)) {
+done:
- old_max_cache_size = cache_ptr->max_cache_size;
- old_min_clean_size = cache_ptr->min_clean_size;
+ FUNC_LEAVE_NOAPI(ret_value)
- new_min_clean_size =
- (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
+} /* H5C__autoadjust__ageout() */
- /* new_min_clean_size is of size_t, and thus must be non-negative.
- * Hence we have
- *
- * ( 0 <= new_min_clean_size ).
- *
- * by definition.
- */
- HDassert(new_min_clean_size <= new_max_cache_size);
- HDassert((cache_ptr->resize_ctl).min_size <= new_max_cache_size);
- HDassert(new_max_cache_size <= (cache_ptr->resize_ctl).max_size);
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__cycle_epoch_marker
+ *
+ * Purpose: Remove the oldest epoch marker from the LRU list,
+ * and reinsert it at the head of the LRU list. Also
+ * remove the epoch marker's index from the head of the
+ * ring buffer, and re-insert it at the tail of the ring
+ * buffer.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t *cache_ptr)
+{
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
- cache_ptr->max_cache_size = new_max_cache_size;
- cache_ptr->min_clean_size = new_min_clean_size;
+ FUNC_ENTER_STATIC
- if (status == increase) {
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- cache_ptr->cache_full = FALSE;
- }
- else if (status == decrease) {
+ if (cache_ptr->epoch_markers_active <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
- cache_ptr->size_decreased = TRUE;
- }
+ /* remove the last marker from both the ring buffer and the LRU list */
- /* update flash cache size increase fields as appropriate */
- if (cache_ptr->flash_size_increase_possible) {
+ i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
- switch ((cache_ptr->resize_ctl).flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- case H5C_flash_incr__add_space:
- cache_ptr->flash_size_increase_threshold =
- (size_t)(((double)(cache_ptr->max_cache_size)) *
- ((cache_ptr->resize_ctl).flash_threshold));
- break;
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
- }
- }
- }
+ if (cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if ((cache_ptr->epoch_marker_active)[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
- (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate,
- status, old_max_cache_size, new_max_cache_size,
- old_min_clean_size, new_min_clean_size);
- }
+ H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
+ (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
- if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
- /* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ /* now, re-insert it at the head of the LRU list, and at the tail of
+ * the ring buffer.
+ */
+
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+
+ (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
+
+ cache_ptr->epoch_marker_ringbuf_size += 1;
+
+ if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+
+ H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
+ (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
done:
- /* Sanity checks */
- HDassert(cache_ptr->resize_in_progress);
- if (!reentrant_call)
- cache_ptr->resize_in_progress = FALSE;
- HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__auto_adjust_cache_size() */
+ FUNC_LEAVE_NOAPI(ret_value)
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout
- *
- * Purpose: Implement the ageout automatic cache size decrement
- * algorithm. Note that while this code evicts aged out
- * entries, the code does not change the maximum cache size.
- * Instead, the function simply computes the new value (if
- * any change is indicated) and reports this value in
- * *new_max_cache_size_ptr.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- *
- * Programmer: John Mainzer, 11/18/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout(H5F_t * f, double hit_rate, enum H5C_resize_status *status_ptr,
- size_t *new_max_cache_size_ptr, hbool_t write_permitted)
- {
- H5C_t *cache_ptr = f->shared->cache;
- size_t test_size;
- herr_t ret_value = SUCCEED; /* Return value */
+} /* H5C__autoadjust__ageout__cycle_epoch_marker() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__evict_aged_out_entries
+ *
+ * Purpose: Evict clean entries in the cache that haven't
+ * been accessed for at least
+ * (cache_ptr->resize_ctl).epochs_before_eviction epochs,
+ * and flush dirty entries that haven't been accessed for
+ * that amount of time.
+ *
+ * Depending on configuration, the function will either
+ * flush or evict all such entries, or all such entries it
+ * encounters until it has freed the maximum amount of space
+ * allowed under the maximum decrement.
+ *
+ * If we are running in parallel mode, writes may not be
+ * permitted. If so, the function simply skips any dirty
+ * entries it may encounter.
+ *
+ * The function makes no attempt to maintain the minimum
+ * clean size, as there is no guarantee that the cache size
+ * will be changed.
+ *
+ * If there is no cache size change, the minimum clean size
+ * constraint will be met through a combination of clean
+ * entries and free space in the cache.
+ *
+ * If there is a cache size reduction, the minimum clean size
+ * will be re-calculated, and will be enforced the next time
+ * we have to make space in the cache.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t *f, hbool_t write_permitted)
+{
+ H5C_t * cache_ptr = f->shared->cache;
+ size_t eviction_size_limit;
+ size_t bytes_evicted = 0;
+ hbool_t prev_is_dirty = FALSE;
+ hbool_t restart_scan;
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *next_ptr;
+ H5C_cache_entry_t *prev_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert((status_ptr) && (*status_ptr == in_spec));
- HDassert((new_max_cache_size_ptr) && (*new_max_cache_size_ptr == 0));
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* remove excess epoch markers if any */
- if (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
- if (H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
+ /* if there is a limit on the amount that the cache size can be decrease
+ * in any one round of the cache size reduction algorithm, load that
+ * limit into eviction_size_limit. Otherwise, set eviction_size_limit
+ * to the equivalent of infinity. The current size of the index will
+ * do nicely.
+ */
+ if ((cache_ptr->resize_ctl).apply_max_decrement) {
- if (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out) ||
- (((cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out_with_threshold) &&
- (hit_rate >= (cache_ptr->resize_ctl).upper_hr_threshold))) {
+ eviction_size_limit = (cache_ptr->resize_ctl).max_decrement;
+ }
+ else {
- if (cache_ptr->max_cache_size > (cache_ptr->resize_ctl).min_size) {
+ eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
+ }
- /* evict aged out cache entries if appropriate... */
- if (H5C__autoadjust__ageout__evict_aged_out_entries(f, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
+ if (write_permitted) {
- /* ... and then reduce cache size if appropriate */
- if (cache_ptr->index_size < cache_ptr->max_cache_size) {
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
- if ((cache_ptr->resize_ctl).apply_empty_reserve) {
+ while ((entry_ptr != NULL) && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
+ (bytes_evicted < eviction_size_limit)) {
+ hbool_t skipping_entry = FALSE;
- test_size = (size_t)(((double)cache_ptr->index_size) /
- (1 - (cache_ptr->resize_ctl).empty_reserve));
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
- if (test_size < cache_ptr->max_cache_size) {
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
- *status_ptr = decrease;
- *new_max_cache_size_ptr = test_size;
- }
- }
- else {
+ if (prev_ptr != NULL)
+ prev_is_dirty = prev_ptr->is_dirty;
- *status_ptr = decrease;
- *new_max_cache_size_ptr = cache_ptr->index_size;
- }
+ if (entry_ptr->is_dirty) {
+ HDassert(!entry_ptr->prefetched_dirty);
- if (*status_ptr == decrease) {
+ /* dirty corked entry is skipped */
+ if (entry_ptr->tag_info && entry_ptr->tag_info->corked)
+ skipping_entry = TRUE;
+ else {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
- /* clip to min size if necessary */
- if (*new_max_cache_size_ptr < (cache_ptr->resize_ctl).min_size) {
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- *new_max_cache_size_ptr = (cache_ptr->resize_ctl).min_size;
- }
+ if (cache_ptr->entries_removed_counter > 1 ||
+ cache_ptr->last_entry_removed_ptr == prev_ptr)
+ restart_scan = TRUE;
+ } /* end else */
+ } /* end if */
+ else if (!entry_ptr->prefetched_dirty) {
- /* clip to max decrement if necessary */
- if (((cache_ptr->resize_ctl).apply_max_decrement) &&
- (((cache_ptr->resize_ctl).max_decrement + *new_max_cache_size_ptr) <
- cache_ptr->max_cache_size)) {
+ bytes_evicted += entry_ptr->size;
- *new_max_cache_size_ptr =
- cache_ptr->max_cache_size - (cache_ptr->resize_ctl).max_decrement;
- }
- }
- }
- }
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ } /* end else-if */
else {
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(entry_ptr->prefetched_dirty);
- *status_ptr = at_min_size;
- }
- }
+ skipping_entry = TRUE;
+ } /* end else */
+
+ if (prev_ptr != NULL) {
+ if (skipping_entry)
+ entry_ptr = prev_ptr;
+ else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) ||
+ (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
+ /* Something has happened to the LRU -- start over
+ * from the tail.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+ } /* end else-if */
+ else
+ entry_ptr = prev_ptr;
+ } /* end if */
+ else
+ entry_ptr = NULL;
+ } /* end while */
+
+ /* for now at least, don't bother to maintain the minimum clean size,
+ * as the cache should now be less than its maximum size. Due to
+ * the vaguries of the cache size reduction algorthim, we may not
+ * reduce the size of the cache.
+ *
+ * If we do, we will calculate a new minimum clean size, which will
+ * be enforced the next time we try to make space in the cache.
+ *
+ * If we don't, no action is necessary, as we have just evicted and/or
+ * or flushed a bunch of entries and therefore the sum of the clean
+ * and free space in the cache must be greater than or equal to the
+ * min clean space requirement (assuming that requirement was met on
+ * entry).
+ */
+
+ } /* end if */
+ else /* ! write_permitted */ {
+ /* Since we are not allowed to write, all we can do is evict
+ * any clean entries that we may encounter before we either
+ * hit the eviction size limit, or encounter the epoch marker.
+ *
+ * If we are operating read only, this isn't an issue, as there
+ * will not be any dirty entries.
+ *
+ * If we are operating in R/W mode, all the dirty entries we
+ * skip will be flushed the next time we attempt to make space
+ * when writes are permitted. This may have some local
+ * performance implications, but it shouldn't cause any net
+ * slowdown.
+ */
+ HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
+ (bytes_evicted < eviction_size_limit)) {
+ HDassert(!(entry_ptr->is_protected));
+
+ prev_ptr = entry_ptr->prev;
+
+ if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
+
+ /* just skip the entry if it is dirty, as we can't do
+ * anything with it now since we can't write.
+ *
+ * Since all entries are clean, serialize() will not be called,
+ * and thus we needn't test to see if the LRU has been changed
+ * out from under us.
+ */
+ entry_ptr = prev_ptr;
+ } /* end while */
+ } /* end else */
+
+ if (cache_ptr->index_size < cache_ptr->max_cache_size)
+ cache_ptr->cache_full = FALSE;
done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__autoadjust__ageout__evict_aged_out_entries() */
- FUNC_LEAVE_NOAPI(ret_value)
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__insert_new_marker
+ *
+ * Purpose: Find an unused marker cache entry, mark it as used, and
+ * insert it at the head of the LRU list. Also add the
+ * marker's index in the epoch_markers array.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__insert_new_marker(H5C_t *cache_ptr)
+{
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
- } /* H5C__autoadjust__ageout() */
+ FUNC_ENTER_STATIC
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__cycle_epoch_marker
- *
- * Purpose: Remove the oldest epoch marker from the LRU list,
- * and reinsert it at the head of the LRU list. Also
- * remove the epoch marker's index from the head of the
- * ring buffer, and re-insert it at the tail of the ring
- * buffer.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ if (cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
+
+ /* find an unused marker */
+ i = 0;
+ while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
+ i++;
+
+ if (i >= H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
+
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
- FUNC_ENTER_NOAPI_NOINIT
+ (cache_ptr->epoch_marker_active)[i] = TRUE;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ cache_ptr->epoch_marker_ringbuf_last =
+ (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- if (cache_ptr->epoch_markers_active <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
+ (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
- /* remove the last marker from both the ring buffer and the LRU list */
+ cache_ptr->epoch_marker_ringbuf_size += 1;
- i = cache_ptr->epoch_marker_ringbuf[cache_ptr->epoch_marker_ringbuf_first];
+ if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+ }
+
+ H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr, (cache_ptr)->LRU_tail_ptr,
+ (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size, (FAIL))
+
+ cache_ptr->epoch_markers_active += 1;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C__autoadjust__ageout__insert_new_marker() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__remove_all_markers
+ *
+ * Purpose: Remove all epoch markers from the LRU list and mark them
+ * as inactive.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/22/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__remove_all_markers(H5C_t *cache_ptr)
+{
+ int ring_buf_index;
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ while (cache_ptr->epoch_markers_active > 0) {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
+ */
+
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
cache_ptr->epoch_marker_ringbuf_first =
(cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
@@ -4735,2875 +5487,2851 @@ done:
if (cache_ptr->epoch_marker_ringbuf_size < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+
if ((cache_ptr->epoch_marker_active)[i] != TRUE)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ /* remove the epoch marker from the LRU list */
H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
(cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
(FAIL))
- /* now, re-insert it at the head of the LRU list, and at the tail of
- * the ring buffer.
- */
+ /* mark the epoch marker as unused. */
+ (cache_ptr->epoch_marker_active)[i] = FALSE;
HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
- cache_ptr->epoch_marker_ringbuf_last =
- (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
-
- (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
-
- cache_ptr->epoch_marker_ringbuf_size += 1;
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
- if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+ HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
+ }
- H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__cycle_epoch_marker() */
+} /* H5C__autoadjust__ageout__remove_all_markers() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__evict_aged_out_entries
- *
- * Purpose: Evict clean entries in the cache that haven't
- * been accessed for at least
- * (cache_ptr->resize_ctl).epochs_before_eviction epochs,
- * and flush dirty entries that haven't been accessed for
- * that amount of time.
- *
- * Depending on configuration, the function will either
- * flush or evict all such entries, or all such entries it
- * encounters until it has freed the maximum amount of space
- * allowed under the maximum decrement.
- *
- * If we are running in parallel mode, writes may not be
- * permitted. If so, the function simply skips any dirty
- * entries it may encounter.
- *
- * The function makes no attempt to maintain the minimum
- * clean size, as there is no guarantee that the cache size
- * will be changed.
- *
- * If there is no cache size change, the minimum clean size
- * constraint will be met through a combination of clean
- * entries and free space in the cache.
- *
- * If there is a cache size reduction, the minimum clean size
- * will be re-calculated, and will be enforced the next time
- * we have to make space in the cache.
- *
- * Observe that this function cannot occasion a read.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f, hbool_t write_permitted)
- {
- H5C_t * cache_ptr = f->shared->cache;
- size_t eviction_size_limit;
- size_t bytes_evicted = 0;
- hbool_t prev_is_dirty = FALSE;
- hbool_t restart_scan;
- H5C_cache_entry_t *entry_ptr;
- H5C_cache_entry_t *next_ptr;
- H5C_cache_entry_t *prev_ptr;
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__autoadjust__ageout__remove_excess_markers
+ *
+ * Purpose: Remove epoch markers from the end of the LRU list and
+ * mark them as inactive until the number of active markers
+ * equals the the current value of
+ * (cache_ptr->resize_ctl).epochs_before_eviction.
+ *
+ * Return: SUCCEED on success/FAIL on failure.
+ *
+ * Programmer: John Mainzer, 11/19/04
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__autoadjust__ageout__remove_excess_markers(H5C_t *cache_ptr)
+{
+ int ring_buf_index;
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
- FUNC_ENTER_NOAPI_NOINIT
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ if (cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
- /* if there is a limit on the amount that the cache size can be decrease
- * in any one round of the cache size reduction algorithm, load that
- * limit into eviction_size_limit. Otherwise, set eviction_size_limit
- * to the equivalent of infinity. The current size of the index will
- * do nicely.
+ while (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
+ /* get the index of the last epoch marker in the LRU list
+ * and remove it from the ring buffer.
*/
- if ((cache_ptr->resize_ctl).apply_max_decrement) {
- eviction_size_limit = (cache_ptr->resize_ctl).max_decrement;
- }
- else {
+ ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
+ i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
- eviction_size_limit = cache_ptr->index_size; /* i.e. infinity */
- }
+ cache_ptr->epoch_marker_ringbuf_first =
+ (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
- if (write_permitted) {
+ cache_ptr->epoch_marker_ringbuf_size -= 1;
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
+ if (cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if ((cache_ptr->epoch_marker_active)[i] != TRUE)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- while ((entry_ptr != NULL) && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
- (bytes_evicted < eviction_size_limit)) {
- hbool_t skipping_entry = FALSE;
+ /* remove the epoch marker from the LRU list */
+ H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
+ (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
+ (FAIL))
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
+ /* mark the epoch marker as unused. */
+ (cache_ptr->epoch_marker_active)[i] = FALSE;
- next_ptr = entry_ptr->next;
- prev_ptr = entry_ptr->prev;
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
- if (prev_ptr != NULL)
- prev_is_dirty = prev_ptr->is_dirty;
+ /* decrement the number of active epoch markers */
+ cache_ptr->epoch_markers_active -= 1;
- if (entry_ptr->is_dirty) {
- HDassert(!entry_ptr->prefetched_dirty);
+ HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
+ }
- /* dirty corked entry is skipped */
- if (entry_ptr->tag_info && entry_ptr->tag_info->corked)
- skipping_entry = TRUE;
- else {
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
+done:
- if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ FUNC_LEAVE_NOAPI(ret_value)
- if (cache_ptr->entries_removed_counter > 1 ||
- cache_ptr->last_entry_removed_ptr == prev_ptr)
- restart_scan = TRUE;
- } /* end else */
- } /* end if */
- else if (!entry_ptr->prefetched_dirty) {
+} /* H5C__autoadjust__ageout__remove_excess_markers() */
- bytes_evicted += entry_ptr->size;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flash_increase_cache_size
+ *
+ * Purpose: If there is not at least new_entry_size - old_entry_size
+ * bytes of free space in the cache and the current
+ * max_cache_size is less than (cache_ptr->resize_ctl).max_size,
+ * perform a flash increase in the cache size and then reset
+ * the full cache hit rate statistics, and exit.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 12/31/07
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flash_increase_cache_size(H5C_t *cache_ptr, size_t old_entry_size, size_t new_entry_size)
+{
+ size_t new_max_cache_size = 0;
+ size_t old_max_cache_size = 0;
+ size_t new_min_clean_size = 0;
+ size_t old_min_clean_size = 0;
+ size_t space_needed;
+ enum H5C_resize_status status = flash_increase; /* may change */
+ double hit_rate;
+ herr_t ret_value = SUCCEED; /* Return value */
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- } /* end else-if */
- else {
- HDassert(!entry_ptr->is_dirty);
- HDassert(entry_ptr->prefetched_dirty);
+ FUNC_ENTER_STATIC
- skipping_entry = TRUE;
- } /* end else */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->flash_size_increase_possible);
+ HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold);
+ HDassert(old_entry_size < new_entry_size);
- if (prev_ptr != NULL) {
- if (skipping_entry)
- entry_ptr = prev_ptr;
- else if (restart_scan || (prev_ptr->is_dirty != prev_is_dirty) ||
- (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) ||
- (prev_ptr->is_pinned)) {
- /* Something has happened to the LRU -- start over
- * from the tail.
- */
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
+ if (old_entry_size >= new_entry_size)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
- } /* end else-if */
- else
- entry_ptr = prev_ptr;
- } /* end if */
- else
- entry_ptr = NULL;
- } /* end while */
+ space_needed = new_entry_size - old_entry_size;
- /* for now at least, don't bother to maintain the minimum clean size,
- * as the cache should now be less than its maximum size. Due to
- * the vaguries of the cache size reduction algorthim, we may not
- * reduce the size of the cache.
- *
- * If we do, we will calculate a new minimum clean size, which will
- * be enforced the next time we try to make space in the cache.
- *
- * If we don't, no action is necessary, as we have just evicted and/or
- * or flushed a bunch of entries and therefore the sum of the clean
- * and free space in the cache must be greater than or equal to the
- * min clean space requirement (assuming that requirement was met on
- * entry).
- */
+ if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
+ (cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) {
- } /* end if */
- else /* ! write_permitted */ {
- /* Since we are not allowed to write, all we can do is evict
- * any clean entries that we may encounter before we either
- * hit the eviction size limit, or encounter the epoch marker.
- *
- * If we are operating read only, this isn't an issue, as there
- * will not be any dirty entries.
- *
- * If we are operating in R/W mode, all the dirty entries we
- * skip will be flushed the next time we attempt to make space
- * when writes are permitted. This may have some local
- * performance implications, but it shouldn't cause any net
- * slowdown.
- */
- HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
- entry_ptr = cache_ptr->LRU_tail_ptr;
- while (entry_ptr != NULL && ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
- (bytes_evicted < eviction_size_limit)) {
- HDassert(!(entry_ptr->is_protected));
+ /* we have work to do */
- prev_ptr = entry_ptr->prev;
+ switch ((cache_ptr->resize_ctl).flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
- if (!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
-
- /* just skip the entry if it is dirty, as we can't do
- * anything with it now since we can't write.
- *
- * Since all entries are clean, serialize() will not be called,
- * and thus we needn't test to see if the LRU has been changed
- * out from under us.
- */
- entry_ptr = prev_ptr;
- } /* end while */
- } /* end else */
+ case H5C_flash_incr__add_space:
+ if (cache_ptr->index_size < cache_ptr->max_cache_size) {
- if (cache_ptr->index_size < cache_ptr->max_cache_size)
- cache_ptr->cache_full = FALSE;
+ HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed);
+ space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size;
+ }
+ space_needed = (size_t)(((double)space_needed) * (cache_ptr->resize_ctl).flash_multiple);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__evict_aged_out_entries() */
+ new_max_cache_size = cache_ptr->max_cache_size + space_needed;
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__insert_new_marker
- *
- * Purpose: Find an unused marker cache entry, mark it as used, and
- * insert it at the head of the LRU list. Also add the
- * marker's index in the epoch_markers array.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/19/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
+ break;
- FUNC_ENTER_NOAPI_NOINIT
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
+ }
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
- if (cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
+ new_max_cache_size = (cache_ptr->resize_ctl).max_size;
+ }
- /* find an unused marker */
- i = 0;
- while ((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
- i++;
+ HDassert(new_max_cache_size > cache_ptr->max_cache_size);
- if (i >= H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
+ new_min_clean_size =
+ (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ HDassert(new_min_clean_size <= new_max_cache_size);
- (cache_ptr->epoch_marker_active)[i] = TRUE;
+ old_max_cache_size = cache_ptr->max_cache_size;
+ old_min_clean_size = cache_ptr->min_clean_size;
- cache_ptr->epoch_marker_ringbuf_last =
- (cache_ptr->epoch_marker_ringbuf_last + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ cache_ptr->max_cache_size = new_max_cache_size;
+ cache_ptr->min_clean_size = new_min_clean_size;
- (cache_ptr->epoch_marker_ringbuf)[cache_ptr->epoch_marker_ringbuf_last] = i;
+ /* update flash cache size increase fields as appropriate */
+ HDassert(cache_ptr->flash_size_increase_possible);
- cache_ptr->epoch_marker_ringbuf_size += 1;
+ switch ((cache_ptr->resize_ctl).flash_incr_mode) {
+ case H5C_flash_incr__off:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flash_size_increase_possible but H5C_flash_incr__off?!")
+ break;
- if (cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS) {
+ case H5C_flash_incr__add_space:
+ cache_ptr->flash_size_increase_threshold = (size_t)(
+ ((double)(cache_ptr->max_cache_size)) * ((cache_ptr->resize_ctl).flash_threshold));
+ break;
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
+ default: /* should be unreachable */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+ break;
}
- H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ /* note that we don't cycle the epoch markers. We can
+ * argue either way as to whether we should, but for now
+ * we don't.
+ */
+
+ if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
+
+ /* get the hit rate for the reporting function. Should still
+ * be good as we haven't reset the hit rate statistics.
+ */
+ if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
- cache_ptr->epoch_markers_active += 1;
+ (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate,
+ status, old_max_cache_size, new_max_cache_size,
+ old_min_clean_size, new_min_clean_size);
+ }
+
+ if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
+ /* this should be impossible... */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
+ }
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__insert_new_marker() */
+} /* H5C__flash_increase_cache_size() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__remove_all_markers
- *
- * Purpose: Remove all epoch markers from the LRU list and mark them
- * as inactive.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/22/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
- int ring_buf_index;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flush_invalidate_cache
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache.
+ *
+ * If the cache contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 3/24/065
+ *
+ * Modifications:
+ *
+ * To support the fractal heap, the cache must now deal with
+ * entries being dirtied, resized, and/or renamed inside
+ * flush callbacks. Updated function to support this.
+ *
+ * -- JRM 8/27/06
+ *
+ * Added code to detect and manage the case in which a
+ * flush callback changes the s-list out from under
+ * the function. The only way I can think of in which this
+ * can happen is if a flush function loads an entry
+ * into the cache that isn't there already. Quincey tells
+ * me that this will never happen, but I'm not sure I
+ * believe him.
+ *
+ * Note that this is a pretty bad scenario if it ever
+ * happens. The code I have added should allow us to
+ * handle the situation under all but the worst conditions,
+ * but one can argue that we should just scream and die if
+ * we ever detect the condition.
+ *
+ * -- JRM 10/13/07
+ *
+ * Missing entries?
+ *
+ *
+ * Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
+ * This flag is used to flush and evict all entries in
+ * the metadata cache that are not pinned -- typically,
+ * everything other than the superblock.
+ *
+ * ??? -- ??/??/??
+ *
+ * Added sanity checks to verify that the skip list is
+ * enabled on entry. On the face of it, it would make
+ * sense to enable the slist on entry, and disable it
+ * on exit, as this function is not called repeatedly.
+ * However, since this function can be called from
+ * H5C_flush_cache(), this would create cases in the test
+ * code where we would have to check the flags to determine
+ * whether we must setup and take down the slist.
+ *
+ * JRM -- 5/5/20
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_invalidate_cache(H5F_t *f, unsigned flags)
+{
+ H5C_t * cache_ptr;
+ H5C_ring_t ring;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_STATIC
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(cache_ptr->slist_enabled);
- while (cache_ptr->epoch_markers_active > 0) {
- /* get the index of the last epoch marker in the LRU list
- * and remove it from the ring buffer.
- */
+#if H5C_DO_SANITY_CHECKS
+ {
+ int32_t i;
+ uint32_t index_len = 0;
+ uint32_t slist_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
- ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
- i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- cache_ptr->epoch_marker_ringbuf_size -= 1;
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
- if (cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
- if ((cache_ptr->epoch_marker_active)[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ } /* end for */
- /* remove the epoch marker from the LRU list */
- H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
- /* mark the epoch marker as unused. */
- (cache_ptr->epoch_marker_active)[i] = FALSE;
+ /* remove ageout markers if present */
+ if (cache_ptr->epoch_markers_active > 0) {
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- /* decrement the number of active epoch markers */
- cache_ptr->epoch_markers_active -= 1;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
+ }
- HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
- }
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
-done:
+ while (ring < H5C_RING_NTYPES) {
- FUNC_LEAVE_NOAPI(ret_value)
+ if (H5C__flush_invalidate_ring(f, ring, flags) < 0)
- } /* H5C__autoadjust__ageout__remove_all_markers() */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
+ ring++;
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__autoadjust__ageout__remove_excess_markers
- *
- * Purpose: Remove epoch markers from the end of the LRU list and
- * mark them as inactive until the number of active markers
- * equals the the current value of
- * (cache_ptr->resize_ctl).epochs_before_eviction.
- *
- * Return: SUCCEED on success/FAIL on failure.
- *
- * Programmer: John Mainzer, 11/19/04
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int i;
- int ring_buf_index;
+ } /* end while */
- FUNC_ENTER_NOAPI_NOINIT
+ /* Invariants, after destroying all entries in the hash table */
+ if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->index_size == 0);
+ HDassert(cache_ptr->clean_index_size == 0);
+ HDassert(cache_ptr->pel_len == 0);
+ HDassert(cache_ptr->pel_size == 0);
- if (cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
+ } /* end if */
+ else {
- while (cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
- /* get the index of the last epoch marker in the LRU list
- * and remove it from the ring buffer.
- */
+ H5C_cache_entry_t *entry_ptr; /* Cache entry */
+ unsigned u; /* Local index variable */
- ring_buf_index = cache_ptr->epoch_marker_ringbuf_first;
- i = (cache_ptr->epoch_marker_ringbuf)[ring_buf_index];
+ /* All rings except ring 4 should be empty now */
+ /* (Ring 4 has the superblock) */
+ for (u = H5C_RING_USER; u < H5C_RING_SB; u++) {
- cache_ptr->epoch_marker_ringbuf_first =
- (cache_ptr->epoch_marker_ringbuf_first + 1) % (H5C__MAX_EPOCH_MARKERS + 1);
+ HDassert(cache_ptr->index_ring_len[u] == 0);
+ HDassert(cache_ptr->index_ring_size[u] == 0);
+ HDassert(cache_ptr->clean_index_ring_size[u] == 0);
- cache_ptr->epoch_marker_ringbuf_size -= 1;
+ } /* end for */
- if (cache_ptr->epoch_marker_ringbuf_size < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
- if ((cache_ptr->epoch_marker_active)[i] != TRUE)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
+ /* Check that any remaining pinned entries are in the superblock ring */
- /* remove the epoch marker from the LRU list */
- H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), (cache_ptr)->LRU_head_ptr,
- (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, (cache_ptr)->LRU_list_size,
- (FAIL))
+ entry_ptr = cache_ptr->pel_head_ptr;
- /* mark the epoch marker as unused. */
- (cache_ptr->epoch_marker_active)[i] = FALSE;
+ while (entry_ptr) {
- HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
- HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
- HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
+ /* Check ring */
+ HDassert(entry_ptr->ring == H5C_RING_SB);
- /* decrement the number of active epoch markers */
- cache_ptr->epoch_markers_active -= 1;
+ /* Advance to next entry in pinned entry list */
+ entry_ptr = entry_ptr->next;
- HDassert(cache_ptr->epoch_markers_active == cache_ptr->epoch_marker_ringbuf_size);
- }
+ } /* end while */
+ } /* end else */
+
+ HDassert(cache_ptr->dirty_index_size == 0);
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->pl_size == 0);
+ HDassert(cache_ptr->LRU_list_len == 0);
+ HDassert(cache_ptr->LRU_list_size == 0);
done:
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__autoadjust__ageout__remove_excess_markers() */
+} /* H5C__flush_invalidate_cache() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__flash_increase_cache_size
- *
- * Purpose: If there is not at least new_entry_size - old_entry_size
- * bytes of free space in the cache and the current
- * max_cache_size is less than (cache_ptr->resize_ctl).max_size,
- * perform a flash increase in the cache size and then reset
- * the full cache hit rate statistics, and exit.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 12/31/07
- *
- *-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_invalidate_ring
+ *
+ * Purpose: Flush and destroy the entries contained in the target
+ * cache and ring.
+ *
+ * If the ring contains protected entries, the function will
+ * fail, as protected entries cannot be either flushed or
+ * destroyed. However all unprotected entries should be
+ * flushed and destroyed before the function returns failure.
+ *
+ * While pinned entries can usually be flushed, they cannot
+ * be destroyed. However, they should be unpinned when all
+ * the entries that reference them have been destroyed (thus
+ * reduding the pinned entry's reference count to 0, allowing
+ * it to be unpinned).
+ *
+ * If pinned entries are present, the function makes repeated
+ * passes through the cache, flushing all dirty entries
+ * (including the pinned dirty entries where permitted) and
+ * destroying all unpinned entries. This process is repeated
+ * until either the cache is empty, or the number of pinned
+ * entries stops decreasing on each pass.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the cache flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ * Changes: Added support for the H5C__EVICT_ALLOW_LAST_PINS_FLAG.
+ * This flag is used to flush and evict all entries in
+ * the metadata cache that are not pinned -- typically,
+ * everything other than the superblock.
+ *
+ * ??? -- ??/??/??
+ *
+ * A recent optimization turns off the slist unless a flush
+ * is in progress. This should not effect this function, as
+ * it is only called during a flush. Added an assertion to
+ * verify this.
+ *
+ * JRM -- 5/6/20
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_invalidate_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t * cache_ptr;
+ hbool_t restart_slist_scan;
+ uint32_t protected_entries = 0;
+ int32_t i;
+ int32_t cur_ring_pel_len;
+ int32_t old_ring_pel_len;
+ unsigned cooked_flags;
+ unsigned evict_flags;
+ H5SL_node_t * node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *next_entry_ptr = NULL;
+#if H5C_DO_SANITY_CHECKS
+ uint32_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ cache_ptr = f->shared->cache;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->epoch_markers_active == 0);
+
+ /* Filter out the flags that are not relevant to the flush/invalidate.
+ */
+ cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
+ evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
+
+ /* The flush procedure here is a bit strange.
+ *
+ * In the outer while loop we make at least one pass through the
+ * cache, and then repeat until either all the pinned entries in
+ * the ring unpin themselves, or until the number of pinned entries
+ * in the ring stops declining. In this later case, we scream and die.
+ *
+ * Since the fractal heap can dirty, resize, and/or move entries
+ * in is flush callback, it is possible that the cache will still
+ * contain dirty entries at this point. If so, we must make more
+ * passes through the skip list to allow it to empty.
+ *
+ * Further, since clean entries can be dirtied, resized, and/or moved
+ * as the result of a flush call back (either the entries own, or that
+ * for some other cache entry), we can no longer promise to flush
+ * the cache entries in increasing address order.
+ *
+ * Instead, we just do the best we can -- making a pass through
+ * the skip list, and then a pass through the "clean" entries, and
+ * then repeating as needed. Thus it is quite possible that an
+ * entry will be evicted from the cache only to be re-loaded later
+ * in the flush process (From what Quincey tells me, the pin
+ * mechanism makes this impossible, but even it it is true now,
+ * we shouldn't count on it in the future.)
+ *
+ * The bottom line is that entries will probably be flushed in close
+ * to increasing address order, but there are no guarantees.
*/
- static herr_t H5C__flash_increase_cache_size(H5C_t * cache_ptr, size_t old_entry_size,
- size_t new_entry_size)
- {
- size_t new_max_cache_size = 0;
- size_t old_max_cache_size = 0;
- size_t new_min_clean_size = 0;
- size_t old_min_clean_size = 0;
- size_t space_needed;
- enum H5C_resize_status status = flash_increase; /* may change */
- double hit_rate;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->flash_size_increase_possible);
- HDassert(new_entry_size > cache_ptr->flash_size_increase_threshold);
- HDassert(old_entry_size < new_entry_size);
- if (old_entry_size >= new_entry_size)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
+ /* compute the number of pinned entries in this ring */
- space_needed = new_entry_size - old_entry_size;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ cur_ring_pel_len = 0;
- if (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
- (cache_ptr->max_cache_size < (cache_ptr->resize_ctl).max_size)) {
+ while (entry_ptr != NULL) {
- /* we have work to do */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+ if (entry_ptr->ring == ring)
+ cur_ring_pel_len++;
- switch ((cache_ptr->resize_ctl).flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
+ entry_ptr = entry_ptr->next;
- case H5C_flash_incr__add_space:
- if (cache_ptr->index_size < cache_ptr->max_cache_size) {
+ } /* end while */
- HDassert((cache_ptr->max_cache_size - cache_ptr->index_size) < space_needed);
- space_needed -= cache_ptr->max_cache_size - cache_ptr->index_size;
- }
- space_needed = (size_t)(((double)space_needed) * (cache_ptr->resize_ctl).flash_multiple);
+ old_ring_pel_len = cur_ring_pel_len;
- new_max_cache_size = cache_ptr->max_cache_size + space_needed;
+ while (cache_ptr->index_ring_len[ring] > 0) {
- break;
+ /* first, try to flush-destroy any dirty entries. Do this by
+ * making a scan through the slist. Note that new dirty entries
+ * may be created by the flush call backs. Thus it is possible
+ * that the slist will not be empty after we finish the scan.
+ */
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
+#if H5C_DO_SANITY_CHECKS
+ /* Depending on circumstances, H5C__flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
+ * slist length and size before we do any flushes.
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* There is also the possibility that entries will be
+ * dirtied, resized, moved, and/or removed from the cache
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
+ * of sanity checking.
+ *
+ * To this end, we must zero these fields before we start
+ * the pass through the slist.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Set the cache_ptr->slist_changed to false.
+ *
+ * This flag is set to TRUE by H5C__flush_single_entry if the slist
+ * is modified by a pre_serialize, serialize, or notify callback.
+ *
+ * H5C__flush_invalidate_ring() uses this flag to detect any
+ * modifications to the slist that might corrupt the scan of
+ * the slist -- and restart the scan in this event.
+ */
+ cache_ptr->slist_changed = FALSE;
+
+ /* this done, start the scan of the slist */
+ restart_slist_scan = TRUE;
+
+ while (restart_slist_scan || (node_ptr != NULL)) {
+
+ if (restart_slist_scan) {
+
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+
+ if (node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
break;
- }
- if (new_max_cache_size > (cache_ptr->resize_ctl).max_size) {
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- new_max_cache_size = (cache_ptr->resize_ctl).max_size;
- }
+ if (NULL == next_entry_ptr)
- HDassert(new_max_cache_size > cache_ptr->max_cache_size);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- new_min_clean_size =
- (size_t)((double)new_max_cache_size * ((cache_ptr->resize_ctl).min_clean_fraction));
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
- HDassert(new_min_clean_size <= new_max_cache_size);
+ } /* end if */
- old_max_cache_size = cache_ptr->max_cache_size;
- old_min_clean_size = cache_ptr->min_clean_size;
+ entry_ptr = next_entry_ptr;
- cache_ptr->max_cache_size = new_max_cache_size;
- cache_ptr->min_clean_size = new_min_clean_size;
+ /* It is possible that entries will be dirtied, resized,
+ * flushed, or removed from the cache via the take ownership
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
+ * This in turn can corrupt the scan through the slist.
+ *
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
+ * slist if we find them. However, best we do some extra
+ * sanity checking just in case.
+ */
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
- /* update flash cache size increase fields as appropriate */
- HDassert(cache_ptr->flash_size_increase_possible);
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
- switch ((cache_ptr->resize_ctl).flash_incr_mode) {
- case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flash_size_increase_possible but H5C_flash_incr__off?!")
- break;
+ if (node_ptr != NULL) {
- case H5C_flash_incr__add_space:
- cache_ptr->flash_size_increase_threshold = (size_t)(
- ((double)(cache_ptr->max_cache_size)) * ((cache_ptr->resize_ctl).flash_threshold));
- break;
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
- break;
+ if (NULL == next_entry_ptr)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else {
+
+ next_entry_ptr = NULL;
}
- /* note that we don't cycle the epoch markers. We can
- * argue either way as to whether we should, but for now
- * we don't.
+ /* Note that we now remove nodes from the slist as we flush
+ * the associated entries, instead of leaving them there
+ * until we are done, and then destroying all nodes in
+ * the slist.
+ *
+ * While this optimization used to be easy, with the possibility
+ * of new entries being added to the slist in the midst of the
+ * flush, we must keep the slist in canonical form at all
+ * times.
*/
+ if (((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
- if ((cache_ptr->resize_ctl).rpt_fcn != NULL) {
+ if (entry_ptr->is_protected) {
- /* get the hit rate for the reporting function. Should still
- * be good as we haven't reset the hit rate statistics.
- */
- if (H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
+ /* we have major problems -- but lets flush
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
- (*((cache_ptr->resize_ctl).rpt_fcn))(cache_ptr, H5C__CURR_AUTO_RESIZE_RPT_FCN_VER, hit_rate,
- status, old_max_cache_size, new_max_cache_size,
- old_min_clean_size, new_min_clean_size);
- }
+ } /* end if */
+ else if (entry_ptr->is_pinned) {
- if (H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
- /* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
- }
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
-done:
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
- FUNC_LEAVE_NOAPI(ret_value)
+ if (cache_ptr->slist_changed) {
- } /* H5C__flash_increase_cache_size() */
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- /*-------------------------------------------------------------------------
- * Function: H5C__flush_invalidate_cache
- *
- * Purpose: Flush and destroy the entries contained in the target
- * cache.
- *
- * If the cache contains protected entries, the function will
- * fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
- * flushed and destroyed before the function returns failure.
- *
- * While pinned entries can usually be flushed, they cannot
- * be destroyed. However, they should be unpinned when all
- * the entries that reference them have been destroyed (thus
- * reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
- *
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
- * (including the pinned dirty entries where permitted) and
- * destroying all unpinned entries. This process is repeated
- * until either the cache is empty, or the number of pinned
- * entries stops decreasing on each pass.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 3/24/065
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__flush_invalidate_cache(H5F_t * f, unsigned flags)
- {
- H5C_t * cache_ptr;
- H5C_ring_t ring;
- herr_t ret_value = SUCCEED;
+ } /* end if */
+ } /* end else-if */
+ else {
- FUNC_ENTER_STATIC
+ if (H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags | H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
-#if H5C_DO_SANITY_CHECKS
- {
- int32_t i;
- uint32_t index_len = 0;
- uint32_t slist_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
-
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
-
- for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
-
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
-
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
- }
-#endif /* H5C_DO_SANITY_CHECKS */
+ if (cache_ptr->slist_changed) {
- /* remove ageout markers if present */
- if (cache_ptr->epoch_markers_active > 0)
- if (H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end else */
+ } /* end if */
+ } /* end while loop scanning skip list */
- /* flush invalidate each ring, starting from the outermost ring and
- * working inward.
+#if H5C_DO_SANITY_CHECKS
+ /* It is possible that entries were added to the slist during
+ * the scan, either before or after scan pointer. The following
+ * asserts take this into account.
+ *
+ * Don't bother with the sanity checks if node_ptr != NULL, as
+ * in this case we broke out of the loop because it got changed
+ * out from under us.
*/
- ring = H5C_RING_USER;
- while (ring < H5C_RING_NTYPES) {
- if (H5C_flush_invalidate_ring(f, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
- ring++;
- } /* end while */
- /* Invariants, after destroying all entries in the hash table */
- if (!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
- HDassert(cache_ptr->index_size == 0);
- HDassert(cache_ptr->clean_index_size == 0);
- HDassert(cache_ptr->pel_len == 0);
- HDassert(cache_ptr->pel_size == 0);
+ if (node_ptr == NULL) {
+
+ HDassert(cache_ptr->slist_len ==
+ (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
+
+ HDassert(cache_ptr->slist_size ==
+ (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
} /* end if */
- else {
- H5C_cache_entry_t *entry_ptr; /* Cache entry */
- unsigned u; /* Local index variable */
-
- /* All rings except ring 4 should be empty now */
- /* (Ring 4 has the superblock) */
- for (u = H5C_RING_USER; u < H5C_RING_SB; u++) {
- HDassert(cache_ptr->index_ring_len[u] == 0);
- HDassert(cache_ptr->index_ring_size[u] == 0);
- HDassert(cache_ptr->clean_index_ring_size[u] == 0);
- } /* end for */
-
- /* Check that any remaining pinned entries are in the superblock ring */
- entry_ptr = cache_ptr->pel_head_ptr;
- while (entry_ptr) {
- /* Check ring */
- HDassert(entry_ptr->ring == H5C_RING_SB);
-
- /* Advance to next entry in pinned entry list */
- entry_ptr = entry_ptr->next;
- } /* end while */
- } /* end else */
- HDassert(cache_ptr->dirty_index_size == 0);
- HDassert(cache_ptr->slist_len == 0);
- HDassert(cache_ptr->slist_size == 0);
- HDassert(cache_ptr->pl_len == 0);
- HDassert(cache_ptr->pl_size == 0);
- HDassert(cache_ptr->LRU_list_len == 0);
- HDassert(cache_ptr->LRU_list_size == 0);
+#endif /* H5C_DO_SANITY_CHECKS */
-done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flush_invalidate_cache() */
+ /* Since we are doing a destroy, we must make a pass through
+ * the hash table and try to flush - destroy all entries that
+ * remain.
+ *
+ * It used to be that all entries remaining in the cache at
+ * this point had to be clean, but with the fractal heap mods
+ * this may not be the case. If so, we will flush entries out
+ * in increasing address order.
+ *
+ * Writes to disk are possible here.
+ */
- /*-------------------------------------------------------------------------
- * Function: H5C_flush_invalidate_ring
- *
- * Purpose: Flush and destroy the entries contained in the target
- * cache and ring.
- *
- * If the ring contains protected entries, the function will
- * fail, as protected entries cannot be either flushed or
- * destroyed. However all unprotected entries should be
- * flushed and destroyed before the function returns failure.
- *
- * While pinned entries can usually be flushed, they cannot
- * be destroyed. However, they should be unpinned when all
- * the entries that reference them have been destroyed (thus
- * reduding the pinned entry's reference count to 0, allowing
- * it to be unpinned).
- *
- * If pinned entries are present, the function makes repeated
- * passes through the cache, flushing all dirty entries
- * (including the pinned dirty entries where permitted) and
- * destroying all unpinned entries. This process is repeated
- * until either the cache is empty, or the number of pinned
- * entries stops decreasing on each pass.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the cache flushing
- * entries in flush dependency order.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/1/15
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C_flush_invalidate_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
- {
- H5C_t * cache_ptr;
- hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
- int32_t i;
- int32_t cur_ring_pel_len;
- int32_t old_ring_pel_len;
- unsigned cooked_flags;
- unsigned evict_flags;
- H5SL_node_t * node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *next_entry_ptr = NULL;
-#if H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
- herr_t ret_value = SUCCEED;
+ /* reset the counters so that we can detect insertions, loads,
+ * and moves caused by the pre_serialize and serialize calls.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
- FUNC_ENTER_NOAPI(FAIL)
+ next_entry_ptr = cache_ptr->il_head;
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
+ while (next_entry_ptr != NULL) {
- HDassert(cache_ptr->epoch_markers_active == 0);
+ entry_ptr = next_entry_ptr;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
- /* Filter out the flags that are not relevant to the flush/invalidate.
- */
- cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
- evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
+ next_entry_ptr = entry_ptr->il_next;
+ HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
- /* The flush procedure here is a bit strange.
- *
- * In the outer while loop we make at least one pass through the
- * cache, and then repeat until either all the pinned entries in
- * the ring unpin themselves, or until the number of pinned entries
- * in the ring stops declining. In this later case, we scream and die.
- *
- * Since the fractal heap can dirty, resize, and/or move entries
- * in is flush callback, it is possible that the cache will still
- * contain dirty entries at this point. If so, we must make more
- * passes through the skip list to allow it to empty.
- *
- * Further, since clean entries can be dirtied, resized, and/or moved
- * as the result of a flush call back (either the entries own, or that
- * for some other cache entry), we can no longer promise to flush
- * the cache entries in increasing address order.
- *
- * Instead, we just do the best we can -- making a pass through
- * the skip list, and then a pass through the "clean" entries, and
- * then repeating as needed. Thus it is quite possible that an
- * entry will be evicted from the cache only to be re-loaded later
- * in the flush process (From what Quincey tells me, the pin
- * mechanism makes this impossible, but even it it is true now,
- * we shouldn't count on it in the future.)
+ if (((!entry_ptr->flush_me_last) ||
+ (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
+
+ if (entry_ptr->is_protected) {
+
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
+ */
+ protected_entries++;
+
+ if (!entry_ptr->in_slist) {
+
+ HDassert(!(entry_ptr->is_dirty));
+ }
+ } /* end if */
+ else if (!(entry_ptr->is_pinned)) {
+
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
+ * into the cache, or moved to a new location
+ * in the file as a side effect of the flush.
+ *
+ * It's also possible that removing a clean
+ * entry will remove the last child of a proxy
+ * entry, allowing it to be removed also and
+ * invalidating the next_entry_ptr.
+ *
+ * If either of these happen, and one of the target
+ * or proxy entries happens to be the next entry in
+ * the hash bucket, we could either find ourselves
+ * either scanning a non-existant entry, scanning
+ * through a different bucket, or skipping an entry.
+ *
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
+ * after the flush if we detect that the next_entry_ptr
+ * becomes invalid.
+ *
+ * This is not as inefficient at it might seem,
+ * as hash buckets typically have at most two
+ * or three entries.
+ */
+ cache_ptr->entry_watched_for_removal = next_entry_ptr;
+
+ if (H5C__flush_single_entry(f, entry_ptr,
+ (cooked_flags | H5C__DURING_FLUSH_FLAG |
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
+
+ /* Restart the index list scan if necessary. Must
+ * do this if the next entry is evicted, and also if
+ * one or more entries are inserted, loaded, or moved
+ * as these operations can result in part of the scan
+ * being skipped -- which can cause a spurious failure
+ * if this results in the size of the pinned entry
+ * failing to decline during the pass.
+ */
+ if (((NULL != next_entry_ptr) && (NULL == cache_ptr->entry_watched_for_removal)) ||
+ (cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
+
+ next_entry_ptr = cache_ptr->il_head;
+
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
+
+ } /* end if */
+ else {
+
+ cache_ptr->entry_watched_for_removal = NULL;
+ }
+ } /* end if */
+ } /* end if */
+ } /* end for loop scanning hash table */
+
+ /* We can't do anything if entries are pinned. The
+ * hope is that the entries will be unpinned as the
+ * result of destroys of entries that reference them.
*
- * The bottom line is that entries will probably be flushed in close
- * to increasing address order, but there are no guarantees.
+ * We detect this by noting the change in the number
+ * of pinned entries from pass to pass. If it stops
+ * shrinking before it hits zero, we scream and die.
*/
-
- /* compute the number of pinned entries in this ring */
+ old_ring_pel_len = cur_ring_pel_len;
entry_ptr = cache_ptr->pel_head_ptr;
cur_ring_pel_len = 0;
+
while (entry_ptr != NULL) {
+
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->ring >= ring);
- if (entry_ptr->ring == ring)
+
+ if (entry_ptr->ring == ring) {
+
cur_ring_pel_len++;
+ }
entry_ptr = entry_ptr->next;
+
} /* end while */
- old_ring_pel_len = cur_ring_pel_len;
- while (cache_ptr->index_ring_len[ring] > 0) {
- /* first, try to flush-destroy any dirty entries. Do this by
- * making a scan through the slist. Note that new dirty entries
- * may be created by the flush call backs. Thus it is possible
- * that the slist will not be empty after we finish the scan.
- */
+ /* Check if the number of pinned entries in the ring is positive, and
+ * it is not declining. Scream and die if so.
+ */
+ if ((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
-#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C__flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
- * slist length and size before we do any flushes.
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* There is also the possibility that entries will be
- * dirtied, resized, moved, and/or removed from the cache
- * as the result of calls to the flush callbacks. We use
- * the slist_len_increase and slist_size_increase increase
- * fields in struct H5C_t to track these changes for purpose
- * of sanity checking.
- *
- * To this end, we must zero these fields before we start
- * the pass through the slist.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
-#endif /* H5C_DO_SANITY_CHECKS */
+ /* Don't error if allowed to have pinned entries remaining */
+ if (evict_flags) {
- /* Set the cache_ptr->slist_changed to false.
- *
- * This flag is set to TRUE by H5C__flush_single_entry if the slist
- * is modified by a pre_serialize, serialize, or notify callback.
- *
- * H5C_flush_invalidate_ring() uses this flag to detect any
- * modifications to the slist that might corrupt the scan of
- * the slist -- and restart the scan in this event.
- */
- cache_ptr->slist_changed = FALSE;
+ HGOTO_DONE(TRUE)
+ }
- /* this done, start the scan of the slist */
- restart_slist_scan = TRUE;
- while (restart_slist_scan || (node_ptr != NULL)) {
- if (restart_slist_scan) {
- restart_slist_scan = FALSE;
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = "
+ "%d, ring = %d",
+ (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
+ } /* end if */
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if (node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
+ HDassert(protected_entries == cache_ptr->pl_len);
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ if ((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
+ "Only protected entries left in cache, protected_entries = %d",
+ (int)protected_entries)
- entry_ptr = next_entry_ptr;
-
- /* It is possible that entries will be dirtied, resized,
- * flushed, or removed from the cache via the take ownership
- * flag as the result of pre_serialize or serialized callbacks.
- *
- * This in turn can corrupt the scan through the slist.
- *
- * We test for slist modifications in the pre_serialize
- * and serialize callbacks, and restart the scan of the
- * slist if we find them. However, best we do some extra
- * sanity checking just in case.
- */
- HDassert(entry_ptr != NULL);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->ring >= ring);
+ } /* main while loop */
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if (node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else
- next_entry_ptr = NULL;
-
- /* Note that we now remove nodes from the slist as we flush
- * the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
- * the slist.
- *
- * While this optimization used to be easy, with the possibility
- * of new entries being added to the slist in the midst of the
- * flush, we must keep the slist in canonical form at all
- * times.
- */
- if (((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) && (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->flush_dep_nchildren == 0) && (entry_ptr->ring == ring)) {
- if (entry_ptr->is_protected) {
- /* we have major problems -- but lets flush
- * everything we can before we flag an error.
- */
- protected_entries++;
- } /* end if */
- else if (entry_ptr->is_pinned) {
- if (H5C__flush_single_entry(f, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
-
- if (cache_ptr->slist_changed) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- } /* end if */
- } /* end else-if */
- else {
- if (H5C__flush_single_entry(f, entry_ptr,
- (cooked_flags | H5C__DURING_FLUSH_FLAG |
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
-
- if (cache_ptr->slist_changed) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end else */
- } /* end if */
- } /* end while loop scanning skip list */
+ /* Invariants, after destroying all entries in the ring */
+ for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
-#if H5C_DO_SANITY_CHECKS
- /* It is possible that entries were added to the slist during
- * the scan, either before or after scan pointer. The following
- * asserts take this into account.
- *
- * Don't bother with the sanity checks if node_ptr != NULL, as
- * in this case we broke out of the loop because it got changed
- * out from under us.
- */
+ HDassert(cache_ptr->index_ring_len[i] == 0);
+ HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
- if (node_ptr == NULL) {
- HDassert(cache_ptr->slist_len ==
- (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
- HDassert(cache_ptr->slist_size ==
- (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
- } /* end if */
-#endif /* H5C_DO_SANITY_CHECKS */
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
- /* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain.
- *
- * It used to be that all entries remaining in the cache at
- * this point had to be clean, but with the fractal heap mods
- * this may not be the case. If so, we will flush entries out
- * in increasing address order.
- *
- * Writes to disk are possible here.
- */
+ } /* end for */
- /* reset the counters so that we can detect insertions, loads,
- * and moves caused by the pre_serialize and serialize calls.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- next_entry_ptr = cache_ptr->il_head;
- while (next_entry_ptr != NULL) {
- entry_ptr = next_entry_ptr;
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
+ HDassert(protected_entries <= cache_ptr->pl_len);
- next_entry_ptr = entry_ptr->il_next;
- HDassert((next_entry_ptr == NULL) || (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
+ if (protected_entries > 0) {
- if ((!entry_ptr->flush_me_last ||
- (entry_ptr->flush_me_last && cache_ptr->num_last_entries >= cache_ptr->slist_len)) &&
- entry_ptr->flush_dep_nchildren == 0 && entry_ptr->ring == ring) {
- if (entry_ptr->is_protected) {
- /* we have major problems -- but lets flush and
- * destroy everything we can before we flag an
- * error.
- */
- protected_entries++;
- if (!entry_ptr->in_slist)
- HDassert(!(entry_ptr->is_dirty));
- } /* end if */
- else if (!(entry_ptr->is_pinned)) {
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
- * into the cache, or moved to a new location
- * in the file as a side effect of the flush.
- *
- * It's also possible that removing a clean
- * entry will remove the last child of a proxy
- * entry, allowing it to be removed also and
- * invalidating the next_entry_ptr.
- *
- * If either of these happen, and one of the target
- * or proxy entries happens to be the next entry in
- * the hash bucket, we could either find ourselves
- * either scanning a non-existant entry, scanning
- * through a different bucket, or skipping an entry.
- *
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
- * after the flush if we detect that the next_entry_ptr
- * becomes invalid.
- *
- * This is not as inefficient at it might seem,
- * as hash buckets typically have at most two
- * or three entries.
- */
- cache_ptr->entry_watched_for_removal = next_entry_ptr;
-
- if (H5C__flush_single_entry(f, entry_ptr,
- (cooked_flags | H5C__DURING_FLUSH_FLAG |
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
-
- /* Restart the index list scan if necessary. Must
- * do this if the next entry is evicted, and also if
- * one or more entries are inserted, loaded, or moved
- * as these operations can result in part of the scan
- * being skipped -- which can cause a spurious failure
- * if this results in the size of the pinned entry
- * failing to decline during the pass.
- */
- if ((NULL != next_entry_ptr && NULL == cache_ptr->entry_watched_for_removal) ||
- (cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
+ }
+ else if (cur_ring_pel_len > 0) {
- next_entry_ptr = cache_ptr->il_head;
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
+ }
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
+done:
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
- } /* end if */
- else
- cache_ptr->entry_watched_for_removal = NULL;
- } /* end if */
- } /* end if */
- } /* end for loop scanning hash table */
+ FUNC_LEAVE_NOAPI(ret_value)
- /* We can't do anything if entries are pinned. The
- * hope is that the entries will be unpinned as the
- * result of destroys of entries that reference them.
- *
- * We detect this by noting the change in the number
- * of pinned entries from pass to pass. If it stops
- * shrinking before it hits zero, we scream and die.
- */
- old_ring_pel_len = cur_ring_pel_len;
- entry_ptr = cache_ptr->pel_head_ptr;
- cur_ring_pel_len = 0;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
+} /* H5C__flush_invalidate_ring() */
- if (entry_ptr->ring == ring)
- cur_ring_pel_len++;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flush_ring
+ *
+ * Purpose: Flush the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been flushed on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be flushed. However all unprotected entries in the target
+ * ring should be flushed before the function returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the slist flushing
+ * entries in flush dependency order.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/1/15
+ *
+ * Changes: A recent optimization turns off the slist unless a flush
+ * is in progress. This should not effect this function, as
+ * it is only called during a flush. Added an assertion to
+ * verify this.
+ *
+ * JRM -- 5/6/20
+ *
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
+{
+ H5C_t * cache_ptr = f->shared->cache;
+ hbool_t flushed_entries_last_pass;
+ hbool_t flush_marked_entries;
+ hbool_t ignore_protected;
+ hbool_t tried_to_flush_protected_entry = FALSE;
+ hbool_t restart_slist_scan;
+ uint32_t protected_entries = 0;
+ H5SL_node_t * node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *next_entry_ptr = NULL;
+#if H5C_DO_SANITY_CHECKS
+ uint32_t initial_slist_len = 0;
+ size_t initial_slist_size = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ int i;
+ herr_t ret_value = SUCCEED;
- entry_ptr = entry_ptr->next;
- } /* end while */
+ FUNC_ENTER_STATIC
- /* Check if the number of pinned entries in the ring is positive, and
- * it is not declining. Scream and die if so.
- */
- if (cur_ring_pel_len > 0 && cur_ring_pel_len >= old_ring_pel_len) {
- /* Don't error if allowed to have pinned entries remaining */
- if (evict_flags)
- HGOTO_DONE(TRUE)
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
- "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = "
- "%d, ring = %d",
- (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
- } /* end if */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_enabled);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
- HDassert(protected_entries == cache_ptr->pl_len);
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
- if (protected_entries > 0 && protected_entries == cache_ptr->index_len)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL,
- "Only protected entries left in cache, protected_entries = %d",
- (int)protected_entries)
- } /* main while loop */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- /* Invariants, after destroying all entries in the ring */
- for (i = (int)H5C_RING_UNDEFINED; i <= (int)ring; i++) {
- HDassert(cache_ptr->index_ring_len[i] == 0);
- HDassert(cache_ptr->index_ring_size[i] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[i] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[i] == (size_t)0);
+ ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
+ flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
- HDassert(cache_ptr->slist_ring_len[i] == 0);
- HDassert(cache_ptr->slist_ring_size[i] == (size_t)0);
- } /* end for */
+ if (!flush_marked_entries) {
- HDassert(protected_entries <= cache_ptr->pl_len);
+ for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++) {
- if (protected_entries > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
- else if (cur_ring_pel_len > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
+ HDassert(cache_ptr->slist_ring_len[i] == 0);
+ }
+ }
-done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_flush_invalidate_ring() */
+ HDassert(cache_ptr->flush_in_progress);
- /*-------------------------------------------------------------------------
- * Function: H5C__flush_ring
- *
- * Purpose: Flush the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been flushed on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be flushed. However all unprotected entries in the target
- * ring should be flushed before the function returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the slist flushing
- * entries in flush dependency order.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/1/15
+ /* When we are only flushing marked entries, the slist will usually
+ * still contain entries when we have flushed everything we should.
+ * Thus we track whether we have flushed any entries in the last
+ * pass, and terminate if we haven't.
+ */
+ flushed_entries_last_pass = TRUE;
+
+ /* Set the cache_ptr->slist_changed to false.
*
- *-------------------------------------------------------------------------
+ * This flag is set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize, serialize, or notify callback.
+ * H5C_flush_cache uses this flag to detect any modifications
+ * to the slist that might corrupt the scan of the slist -- and
+ * restart the scan in this event.
*/
- static herr_t H5C__flush_ring(H5F_t * f, H5C_ring_t ring, unsigned flags)
- {
- H5C_t * cache_ptr = f->shared->cache;
- hbool_t flushed_entries_last_pass;
- hbool_t flush_marked_entries;
- hbool_t ignore_protected;
- hbool_t tried_to_flush_protected_entry = FALSE;
- hbool_t restart_slist_scan;
- uint32_t protected_entries = 0;
- H5SL_node_t * node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
- H5C_cache_entry_t *next_entry_ptr = NULL;
+ cache_ptr->slist_changed = FALSE;
+
+ while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) && (flushed_entries_last_pass)) {
+
+ flushed_entries_last_pass = FALSE;
+
#if H5C_DO_SANITY_CHECKS
- uint32_t initial_slist_len = 0;
- size_t initial_slist_size = 0;
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
+
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
#endif /* H5C_DO_SANITY_CHECKS */
- int i;
- herr_t ret_value = SUCCEED;
- FUNC_ENTER_STATIC
+ restart_slist_scan = TRUE;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
- HDassert((flags & H5C__FLUSH_INVALIDATE_FLAG) == 0);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
+ while ((restart_slist_scan) || (node_ptr != NULL)) {
-#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+ if (restart_slist_scan) {
- ignore_protected = ((flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0);
- flush_marked_entries = ((flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0);
+ restart_slist_scan = FALSE;
- if (!flush_marked_entries)
- for (i = (int)H5C_RING_UNDEFINED; i < (int)ring; i++)
- HDassert(cache_ptr->slist_ring_len[i] == 0);
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- HDassert(cache_ptr->flush_in_progress);
+ if (node_ptr == NULL) {
- /* When we are only flushing marked entries, the slist will usually
- * still contain entries when we have flushed everything we should.
- * Thus we track whether we have flushed any entries in the last
- * pass, and terminate if we haven't.
- */
- flushed_entries_last_pass = TRUE;
+ /* the slist is empty -- break out of inner loop */
+ break;
+ }
- /* Set the cache_ptr->slist_changed to false.
- *
- * This flag is set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize, serialize, or notify callback.
- * H5C_flush_cache uses this flag to detect any modifications
- * to the slist that might corrupt the scan of the slist -- and
- * restart the scan in this event.
- */
- cache_ptr->slist_changed = FALSE;
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- while ((cache_ptr->slist_ring_len[ring] > 0) && (protected_entries == 0) &&
- (flushed_entries_last_pass)) {
- flushed_entries_last_pass = FALSE;
+ if (NULL == next_entry_ptr)
-#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
*
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
*
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
*/
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+
+ if ((!flush_marked_entries) || (entry_ptr->flush_marker)) {
+
+ HDassert(entry_ptr->ring >= ring);
+ }
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
+ /* Advance node pointer now, before we delete its target
+ * from the slist.
*/
-#endif /* H5C_DO_SANITY_CHECKS */
+ node_ptr = H5SL_next(node_ptr);
- restart_slist_scan = TRUE;
+ if (node_ptr != NULL) {
- while ((restart_slist_scan) || (node_ptr != NULL)) {
- if (restart_slist_scan) {
- restart_slist_scan = FALSE;
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ if (NULL == next_entry_ptr)
- if (node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ if (!flush_marked_entries || next_entry_ptr->flush_marker) {
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- } /* end if */
+ HDassert(next_entry_ptr->ring >= ring);
+ }
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, I have inserted code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
- if (!flush_marked_entries || entry_ptr->flush_marker)
- HDassert(entry_ptr->ring >= ring);
-
- /* Advance node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if (node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if (NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ HDassert(entry_ptr != next_entry_ptr);
+
+ } /* end if */
+ else {
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
+ next_entry_ptr = NULL;
+ }
+
+ if ((!flush_marked_entries || entry_ptr->flush_marker) &&
+ ((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) && ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
+ (flush_marked_entries && entry_ptr->flush_marker)))) &&
+ ((entry_ptr->flush_dep_nchildren == 0) || (entry_ptr->flush_dep_ndirty_children == 0)) &&
+ (entry_ptr->ring == ring)) {
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
- if (!flush_marked_entries || next_entry_ptr->flush_marker)
- HDassert(next_entry_ptr->ring >= ring);
+ if (entry_ptr->is_protected) {
+
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
- HDassert(entry_ptr != next_entry_ptr);
} /* end if */
- else
- next_entry_ptr = NULL;
+ else {
- if ((!flush_marked_entries || entry_ptr->flush_marker) &&
- (!entry_ptr->flush_me_last ||
- (entry_ptr->flush_me_last && (cache_ptr->num_last_entries >= cache_ptr->slist_len ||
- (flush_marked_entries && entry_ptr->flush_marker)))) &&
- (entry_ptr->flush_dep_nchildren == 0 || entry_ptr->flush_dep_ndirty_children == 0) &&
- entry_ptr->ring == ring) {
+ if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
+
+ if (cache_ptr->slist_changed) {
- if (entry_ptr->is_protected) {
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
*/
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_changed = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+
} /* end if */
- else {
- if (H5C__flush_single_entry(f, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
-
- if (cache_ptr->slist_changed) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_changed = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- flushed_entries_last_pass = TRUE;
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
+ flushed_entries_last_pass = TRUE;
+
+ } /* end else */
+ } /* end if */
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
#if H5C_DO_SANITY_CHECKS
- /* Verify that the slist size and length are as expected. */
- HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) ==
- cache_ptr->slist_len);
- HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) ==
- cache_ptr->slist_size);
-#endif /* H5C_DO_SANITY_CHECKS */
- } /* while */
+ /* Verify that the slist size and length are as expected. */
+ HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) ==
+ cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) ==
+ cache_ptr->slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ } /* while */
- HDassert(protected_entries <= cache_ptr->pl_len);
+ HDassert(protected_entries <= cache_ptr->pl_len);
- if (((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
+ if (((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
#if H5C_DO_SANITY_CHECKS
- if (!flush_marked_entries) {
- HDassert(cache_ptr->slist_ring_len[ring] == 0);
- HDassert(cache_ptr->slist_ring_size[ring] == 0);
- } /* end if */
-#endif /* H5C_DO_SANITY_CHECKS */
+ if (!flush_marked_entries) {
+
+ HDassert(cache_ptr->slist_ring_len[ring] == 0);
+ HDassert(cache_ptr->slist_ring_size[ring] == 0);
+
+ } /* end if */
+#endif /* H5C_DO_SANITY_CHECKS */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flush_ring() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_single_entry
- *
- * Purpose: Flush or clear (and evict if requested) the cache entry
- * with the specified address and type. If the type is NULL,
- * any unprotected entry at the specified address will be
- * flushed (and possibly evicted).
- *
- * Attempts to flush a protected entry will result in an
- * error.
- *
- * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
- * be cleared and not flushed, and the call can't be part of a
- * sequence of flushes.
- *
- * If the caller knows the address of the skip list node at
- * which the target entry resides, it can avoid a lookup
- * by supplying that address in the tgt_node_ptr parameter.
- * If this parameter is NULL, the function will do a skip list
- * search for the entry instead.
- *
- * The function does nothing silently if there is no entry
- * at the supplied address, or if the entry found has the
- * wrong type.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * an attempt to flush a protected item.
- *
- * Programmer: John Mainzer, 5/5/04
- *
- * Changes: Please maintain the changes list, and do not delete it
- * unless you have merged it into the header comment
- * proper.
- *
- * Added macro calls to maintain page buffer hints.
- *
- * JRM -- 3/20/20
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C__flush_ring() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flush_single_entry
+ *
+ * Purpose: Flush or clear (and evict if requested) the cache entry
+ * with the specified address and type. If the type is NULL,
+ * any unprotected entry at the specified address will be
+ * flushed (and possibly evicted).
+ *
+ * Attempts to flush a protected entry will result in an
+ * error.
+ *
+ * If the H5C__FLUSH_INVALIDATE_FLAG flag is set, the entry will
+ * be cleared and not flushed, and the call can't be part of a
+ * sequence of flushes.
+ *
+ * If the caller knows the address of the skip list node at
+ * which the target entry resides, it can avoid a lookup
+ * by supplying that address in the tgt_node_ptr parameter.
+ * If this parameter is NULL, the function will do a skip list
+ * search for the entry instead.
+ *
+ * The function does nothing silently if there is no entry
+ * at the supplied address, or if the entry found has the
+ * wrong type.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * an attempt to flush a protected item.
+ *
+ * Programmer: John Mainzer, 5/5/04
+ *
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * QAK -- 11/26/04
+ * Updated function for the switch from TBBTs to skip lists.
+ *
+ * JRM -- 1/6/05
+ * Updated function to reset the flush_marker field.
+ * Also replace references to H5F_FLUSH_INVALIDATE and
+ * H5F_FLUSH_CLEAR_ONLY with references to
+ * H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG
+ * respectively.
+ *
+ * JRM -- 6/24/05
+ * Added code to remove dirty entries from the slist after
+ * they have been flushed. Also added a sanity check that
+ * will scream if we attempt a write when writes are
+ * completely disabled.
+ *
+ * JRM -- 7/5/05
+ * Added code to call the new log_flush callback whenever
+ * a dirty entry is written to disk. Note that the callback
+ * is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set,
+ * as there is no write to file in this case.
+ *
+ * JRM -- 8/21/06
+ * Added code maintaining the flush_in_progress and
+ * destroy_in_progress fields in H5C_cache_entry_t.
+ *
+ * Also added flush_flags parameter to the call to
+ * type_ptr->flush() so that the flush routine can report
+ * whether the entry has been resized or renamed. Added
+ * code using the flush_flags variable to detect the case
+ * in which the target entry is resized during flush, and
+ * update the caches data structures accordingly.
+ *
+ * JRM -- 3/29/07
+ * Added sanity checks on the new is_read_only and
+ * ro_ref_count fields.
+ *
+ * QAK -- 2/07/08
+ * Separated "destroy entry" concept from "remove entry from
+ * cache" concept, by adding the 'take_ownership' flag and
+ * the "destroy_entry" variable.
+ *
+ * JRM -- 11/5/08
+ * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
+ * maintain the new clean_index_size and clean_index_size
+ * fields of H5C_t.
+ *
+ *
+ * Missing entries??
+
+ * Added macro calls to maintain page buffer hints.
+ *
+ * JRM -- 3/20/20
+ *
+ * JRM -- 5/8/20
+ * Updated sanity checks for the possibility that the slist
+ * is disabled.
+ *
+ * Also updated main comment to conform more closely with
+ * the current state of the code.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
+{
+ H5C_t * cache_ptr; /* Cache for file */
+ hbool_t destroy; /* external flag */
+ hbool_t clear_only; /* external flag */
+ hbool_t free_file_space; /* external flag */
+ hbool_t take_ownership; /* external flag */
+ hbool_t del_from_slist_on_destroy; /* external flag */
+ hbool_t during_flush; /* external flag */
+ hbool_t write_entry; /* internal flag */
+ hbool_t destroy_entry; /* internal flag */
+ hbool_t generate_image; /* internal flag */
+ hbool_t update_page_buffer; /* internal flag */
+ hbool_t was_dirty;
+ hbool_t suppress_image_entry_writes = FALSE;
+ hbool_t suppress_image_entry_frees = FALSE;
+ haddr_t entry_addr = HADDR_UNDEF;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert(f);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->type);
+
+ /* setup external flags from the flags parameter */
+ destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
+ clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
+ free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
+ take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
+ del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
+ during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
+ generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
+ update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
+
+ /* Set the flag for destroying the entry, based on the 'take ownership'
+ * and 'destroy' flags
+ */
+ if (take_ownership) {
+
+ destroy_entry = FALSE;
+ }
+ else {
+
+ destroy_entry = destroy;
+ }
+
+ /* we will write the entry to disk if it exists, is dirty, and if the
+ * clear only flag is not set.
+ */
+ if (entry_ptr->is_dirty && !clear_only) {
+
+ write_entry = TRUE;
+ }
+ else {
+
+ write_entry = FALSE;
+ }
+
+ /* if we have received close warning, and we have been instructed to
+ * generate a metadata cache image, and we have actually constructed
+ * the entry images, set suppress_image_entry_frees to TRUE.
*
- *-------------------------------------------------------------------------
+ * Set suppress_image_entry_writes to TRUE if indicated by the
+ * image_ctl flags.
*/
- herr_t H5C__flush_single_entry(H5F_t * f, H5C_cache_entry_t * entry_ptr, unsigned flags)
- {
- H5C_t * cache_ptr; /* Cache for file */
- hbool_t destroy; /* external flag */
- hbool_t clear_only; /* external flag */
- hbool_t free_file_space; /* external flag */
- hbool_t take_ownership; /* external flag */
- hbool_t del_from_slist_on_destroy; /* external flag */
- hbool_t during_flush; /* external flag */
- hbool_t write_entry; /* internal flag */
- hbool_t destroy_entry; /* internal flag */
- hbool_t generate_image; /* internal flag */
- hbool_t update_page_buffer; /* internal flag */
- hbool_t was_dirty;
- hbool_t suppress_image_entry_writes = FALSE;
- hbool_t suppress_image_entry_frees = FALSE;
- haddr_t entry_addr = HADDR_UNDEF;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_PACKAGE
-
- HDassert(f);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
- HDassert(entry_ptr->type);
-
- /* setup external flags from the flags parameter */
- destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
- clear_only = ((flags & H5C__FLUSH_CLEAR_ONLY_FLAG) != 0);
- free_file_space = ((flags & H5C__FREE_FILE_SPACE_FLAG) != 0);
- take_ownership = ((flags & H5C__TAKE_OWNERSHIP_FLAG) != 0);
- del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
- during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
- generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
- update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
-
- /* Set the flag for destroying the entry, based on the 'take ownership'
- * and 'destroy' flags
- */
- if (take_ownership)
- destroy_entry = FALSE;
- else
- destroy_entry = destroy;
+ if ((cache_ptr->close_warning_received) && (cache_ptr->image_ctl.generate_image) &&
+ (cache_ptr->num_entries_in_image > 0) && (cache_ptr->image_entries != NULL)) {
- /* we will write the entry to disk if it exists, is dirty, and if the
- * clear only flag is not set.
- */
- if (entry_ptr->is_dirty && !clear_only)
- write_entry = TRUE;
- else
- write_entry = FALSE;
+ /* Sanity checks */
+ HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
+ HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
+ HDassert((!clear_only) || !(entry_ptr->include_in_image));
+ HDassert((!take_ownership) || !(entry_ptr->include_in_image));
+ HDassert((!free_file_space) || !(entry_ptr->include_in_image));
- /* if we have received close warning, and we have been instructed to
- * generate a metadata cache image, and we have actually constructed
- * the entry images, set suppress_image_entry_frees to TRUE.
- *
- * Set suppress_image_entry_writes to TRUE if indicated by the
- * image_ctl flags.
- */
- if (cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image &&
- cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries) {
- /* Sanity checks */
- HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
- HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
- HDassert((!clear_only) || !(entry_ptr->include_in_image));
- HDassert((!take_ownership) || !(entry_ptr->include_in_image));
- HDassert((!free_file_space) || !(entry_ptr->include_in_image));
+ suppress_image_entry_frees = TRUE;
+
+ if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES) {
- suppress_image_entry_frees = TRUE;
+ suppress_image_entry_writes = TRUE;
- if (cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES)
- suppress_image_entry_writes = TRUE;
} /* end if */
+ } /* end if */
- /* run initial sanity checks */
+ /* run initial sanity checks */
#if H5C_DO_SANITY_CHECKS
+ if (cache_ptr->slist_enabled) {
+
if (entry_ptr->in_slist) {
+
HDassert(entry_ptr->is_dirty);
if ((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
} /* end if */
else {
+
HDassert(!entry_ptr->is_dirty);
HDassert(!entry_ptr->flush_marker);
if ((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
+
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
+
} /* end else */
-#endif /* H5C_DO_SANITY_CHECKS */
+ }
+ else { /* slist is disabled */
- if (entry_ptr->is_protected) {
- HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->in_slist);
- /* Attempt to flush a protected entry -- scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
- } /* end if */
+ if (!entry_ptr->is_dirty) {
- /* Set entry_ptr->flush_in_progress = TRUE and set
- * entry_ptr->flush_marker = FALSE
- *
- * We will set flush_in_progress back to FALSE at the end if the
- * entry still exists at that point.
- */
- entry_ptr->flush_in_progress = TRUE;
- entry_ptr->flush_marker = FALSE;
+ if (entry_ptr->flush_marker)
- /* Preserve current dirty state for later */
- was_dirty = entry_ptr->is_dirty;
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush marked clean entry?")
+ }
+ }
+#endif /* H5C_DO_SANITY_CHECKS */
- /* The entry is dirty, and we are doing a flush, a flush destroy or have
- * been requested to generate an image. In those cases, serialize the
- * entry.
- */
- if (write_entry || generate_image) {
- HDassert(entry_ptr->is_dirty);
+ if (entry_ptr->is_protected) {
+
+ HDassert(!entry_ptr->is_protected);
+
+ /* Attempt to flush a protected entry -- scream and die. */
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
+
+ } /* end if */
+
+ /* Set entry_ptr->flush_in_progress = TRUE and set
+ * entry_ptr->flush_marker = FALSE
+ *
+ * We will set flush_in_progress back to FALSE at the end if the
+ * entry still exists at that point.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+ entry_ptr->flush_marker = FALSE;
+
+ /* Preserve current dirty state for later */
+ was_dirty = entry_ptr->is_dirty;
+
+ /* The entry is dirty, and we are doing a flush, a flush destroy or have
+ * been requested to generate an image. In those cases, serialize the
+ * entry.
+ */
+ if (write_entry || generate_image) {
+
+ HDassert(entry_ptr->is_dirty);
+
+ if (NULL == entry_ptr->image_ptr) {
+
+ if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for on disk image buffer")
- if (NULL == entry_ptr->image_ptr) {
- if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- if (!(entry_ptr->image_up_to_date)) {
- /* Sanity check */
- HDassert(!entry_ptr->prefetched);
+ } /* end if */
- /* Generate the entry's image */
- if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
- } /* end if ( ! (entry_ptr->image_up_to_date) ) */
- } /* end if */
+ if (!(entry_ptr->image_up_to_date)) {
- /* Finally, write the image to disk.
- *
- * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
- * in the entry's type, we silently skip the write. This
- * flag should only be used in test code.
- */
- if (write_entry) {
- HDassert(entry_ptr->is_dirty);
+ /* Sanity check */
+ HDassert(!entry_ptr->prefetched);
+
+ /* Generate the entry's image */
+ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
+
+ } /* end if ( ! (entry_ptr->image_up_to_date) ) */
+ } /* end if */
+
+ /* Finally, write the image to disk.
+ *
+ * Note that if the H5AC__CLASS_SKIP_WRITES flag is set in the
+ * in the entry's type, we silently skip the write. This
+ * flag should only be used in test code.
+ */
+ if (write_entry) {
+
+ HDassert(entry_ptr->is_dirty);
#if H5C_DO_SANITY_CHECKS
- if (cache_ptr->check_write_permitted && !(cache_ptr->write_permitted))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
+ if ((cache_ptr->check_write_permitted) && (!(cache_ptr->write_permitted)))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
#endif /* H5C_DO_SANITY_CHECKS */
- /* Write the image to disk unless the write is suppressed.
- *
- * This happens if both suppress_image_entry_writes and
- * entry_ptr->include_in_image are TRUE, or if the
- * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
- * flag should only be used in test code
- */
- if ((!suppress_image_entry_writes || !entry_ptr->include_in_image) &&
- (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
- H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
+ /* Write the image to disk unless the write is suppressed.
+ *
+ * This happens if both suppress_image_entry_writes and
+ * entry_ptr->include_in_image are TRUE, or if the
+ * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
+ * flag should only be used in test code
+ */
+ if (((!suppress_image_entry_writes) || (!entry_ptr->include_in_image)) &&
+ (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
+
+ H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
#ifdef H5_HAVE_PARALLEL
- if (cache_ptr->coll_write_list) {
- if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
- } /* end if */
- else {
+ if (cache_ptr->coll_write_list) {
+
+ if (H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
+ } /* end if */
+ else {
#endif /* H5_HAVE_PARALLEL */
- if (entry_ptr->prefetched) {
- HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
- } /* end if */
- else
- mem_type = entry_ptr->type->mem_type;
+ if (entry_ptr->prefetched) {
- H5C__SET_PB_WRITE_HINTS(cache_ptr, entry_ptr->type)
+ HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
- if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) <
- 0) {
+ mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
+ } /* end if */
+ else {
- H5C__RESET_PB_WRITE_HINTS(cache_ptr)
+ mem_type = entry_ptr->type->mem_type;
+ }
+
+ H5C__SET_PB_WRITE_HINTS(cache_ptr, entry_ptr->type)
+
+ if (H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) < 0) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
- }
H5C__RESET_PB_WRITE_HINTS(cache_ptr)
-#ifdef H5_HAVE_PARALLEL
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
}
-#endif /* H5_HAVE_PARALLEL */
- } /* end if */
+ H5C__RESET_PB_WRITE_HINTS(cache_ptr)
+#ifdef H5_HAVE_PARALLEL
+ }
+#endif /* H5_HAVE_PARALLEL */
- /* if the entry has a notify callback, notify it that we have
- * just flushed the entry.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
- } /* if ( write_entry ) */
+ } /* end if */
- /* At this point, all pre-serialize and serialize calls have been
- * made if it was appropriate to make them. Similarly, the entry
- * has been written to disk if desired.
- *
- * Thus it is now safe to update the cache data structures for the
- * flush.
+ /* if the entry has a notify callback, notify it that we have
+ * just flushed the entry.
*/
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_FLUSH, entry_ptr) < 0))
- /* start by updating the statistics */
- if (clear_only) {
- /* only log a clear if the entry was dirty */
- if (was_dirty) {
- H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
- } /* end if */
- }
- else if (write_entry) {
- HDassert(was_dirty);
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client of entry flush")
- /* only log a flush if we actually wrote to disk */
- H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
- } /* end else if */
+ } /* if ( write_entry ) */
- /* Note that the algorithm below is (very) similar to the set of operations
- * in H5C_remove_entry() and should be kept in sync with changes
- * to that code. - QAK, 2016/11/30
- */
+ /* At this point, all pre-serialize and serialize calls have been
+ * made if it was appropriate to make them. Similarly, the entry
+ * has been written to disk if desired.
+ *
+ * Thus it is now safe to update the cache data structures for the
+ * flush.
+ */
- /* Update the cache internal data structures. */
- if (destroy) {
- /* Sanity checks */
- if (take_ownership)
- HDassert(!destroy_entry);
- else
- HDassert(destroy_entry);
- HDassert(!entry_ptr->is_pinned);
+ /* start by updating the statistics */
+ if (clear_only) {
- /* Update stats, while entry is still in the cache */
- H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
+ /* only log a clear if the entry was dirty */
+ if (was_dirty) {
- /* If the entry's type has a 'notify' callback and the entry is about
- * to be removed from the cache, send a 'before eviction' notice while
- * the entry is still fully integrated in the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+ H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
- /* Update the cache internal data structures as appropriate
- * for a destroy. Specifically:
- *
- * 1) Delete it from the index
- *
- * 2) Delete it from the skip list if requested.
- *
- * 3) Delete it from the collective read access list.
- *
- * 4) Update the replacement policy for eviction
- *
- * 5) Remove it from the tag list for this object
- *
- * Finally, if the destroy_entry flag is set, discard the
- * entry.
- */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
+ }
+ else if (write_entry) {
- if (entry_ptr->in_slist && del_from_slist_on_destroy)
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+ HDassert(was_dirty);
-#ifdef H5_HAVE_PARALLEL
- /* Check for collective read access flag */
- if (entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ /* only log a flush if we actually wrote to disk */
+ H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
- H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+ } /* end else if */
- /* Remove entry from tag list */
- if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+ /* Note that the algorithm below is (very) similar to the set of operations
+ * in H5C_remove_entry() and should be kept in sync with changes
+ * to that code. - QAK, 2016/11/30
+ */
- /* verify that the entry is no longer part of any flush dependencies */
- HDassert(entry_ptr->flush_dep_nparents == 0);
- HDassert(entry_ptr->flush_dep_nchildren == 0);
- } /* end if */
+ /* Update the cache internal data structures. */
+ if (destroy) {
+
+ /* Sanity checks */
+ if (take_ownership) {
+
+ HDassert(!destroy_entry);
+ }
else {
- HDassert(clear_only || write_entry);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
- /* We are either doing a flush or a clear.
- *
- * A clear and a flush are the same from the point of
- * view of the replacement policy and the slist.
- * Hence no differentiation between them.
- *
- * JRM -- 7/7/07
- */
+ HDassert(destroy_entry);
+ }
+
+ HDassert(!entry_ptr->is_pinned);
+
+ /* Update stats, while entry is still in the cache */
+ H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
+
+ /* If the entry's type has a 'notify' callback and the entry is about
+ * to be removed from the cache, send a 'before eviction' notice while
+ * the entry is still fully integrated in the cache.
+ */
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry_ptr) < 0))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+
+ /* Update the cache internal data structures as appropriate
+ * for a destroy. Specifically:
+ *
+ * 1) Delete it from the index
+ *
+ * 2) Delete it from the skip list if requested.
+ *
+ * 3) Delete it from the collective read access list.
+ *
+ * 4) Update the replacement policy for eviction
+ *
+ * 5) Remove it from the tag list for this object
+ *
+ * Finally, if the destroy_entry flag is set, discard the
+ * entry.
+ */
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
- H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
+ if ((entry_ptr->in_slist) && (del_from_slist_on_destroy)) {
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+ }
- /* mark the entry as clean and update the index for
- * entry clean. Also, call the clear callback
- * if defined.
- */
- entry_ptr->is_dirty = FALSE;
+#ifdef H5_HAVE_PARALLEL
+ /* Check for collective read access flag */
+ if (entry_ptr->coll_access) {
- H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
+ entry_ptr->coll_access = FALSE;
- /* Check for entry changing status and do notifications, etc. */
- if (was_dirty) {
- /* If the entry's type has a 'notify' callback send a 'entry cleaned'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
+ H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- /* Propagate the clean flag up the flush dependency chain if appropriate */
- if (entry_ptr->flush_dep_ndirty_children != 0)
- HDassert(entry_ptr->flush_dep_ndirty_children == 0);
- if (entry_ptr->flush_dep_nparents > 0)
- if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL,
- "Can't propagate flush dep clean flag")
- } /* end if */
- } /* end else */
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- /* reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
+ H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
- /* capture the cache entry address for the log_flush call at the
- end before the entry_ptr gets freed */
- entry_addr = entry_ptr->addr;
+ /* Remove entry from tag list */
+ if (H5C__untag_entry(cache_ptr, entry_ptr) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+
+ /* verify that the entry is no longer part of any flush dependencies */
+ HDassert(entry_ptr->flush_dep_nparents == 0);
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
+ } /* end if */
+ else {
+
+ HDassert(clear_only || write_entry);
+ HDassert(entry_ptr->is_dirty);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
+
+ /* We are either doing a flush or a clear.
+ *
+ * A clear and a flush are the same from the point of
+ * view of the replacement policy and the slist.
+ * Hence no differentiation between them.
*
- * Now discard the entry if appropriate.
+ * JRM -- 7/7/07
*/
- if (destroy) {
- /* Sanity check */
- HDassert(0 == entry_ptr->flush_dep_nparents);
- /* if both suppress_image_entry_frees and entry_ptr->include_in_image
- * are true, simply set entry_ptr->image_ptr to NULL, as we have
- * another pointer to the buffer in an instance of H5C_image_entry_t
- * in cache_ptr->image_entries.
- *
- * Otherwise, free the buffer if it exists.
- */
- if (suppress_image_entry_frees && entry_ptr->include_in_image)
- entry_ptr->image_ptr = NULL;
- else if (entry_ptr->image_ptr != NULL)
- entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, FAIL)
- /* If the entry is not a prefetched entry, verify that the flush
- * dependency parents addresses array has been transferred.
- *
- * If the entry is prefetched, the free_isr routine will dispose of
- * the flush dependency parents addresses array if necessary.
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+
+ /* mark the entry as clean and update the index for
+ * entry clean. Also, call the clear callback
+ * if defined.
+ */
+ entry_ptr->is_dirty = FALSE;
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr);
+
+ /* Check for entry changing status and do notifications, etc. */
+ if (was_dirty) {
+
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
*/
- if (!entry_ptr->prefetched) {
- HDassert(0 == entry_ptr->fd_parent_count);
- HDassert(NULL == entry_ptr->fd_parent_addrs);
- } /* end if */
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
- /* Check whether we should free the space in the file that
- * the entry occupies
+ /* Propagate the clean flag up the flush dependency chain
+ * if appropriate
*/
- if (free_file_space) {
- hsize_t fsf_size;
+ if (entry_ptr->flush_dep_ndirty_children != 0) {
- /* Sanity checks */
- HDassert(H5F_addr_defined(entry_ptr->addr));
- HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
-#ifndef NDEBUG
- {
- size_t curr_len;
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+ }
- /* Get the actual image size for the thing again */
- entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
- HDassert(curr_len == entry_ptr->size);
- }
-#endif /* NDEBUG */
+ if (entry_ptr->flush_dep_nparents > 0) {
- /* If the file space free size callback is defined, use
- * it to get the size of the block of file space to free.
- * Otherwise use entry_ptr->size.
- */
- if (entry_ptr->type->fsf_size) {
- if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
- } /* end if */
- else /* no file space free size callback -- use entry size */
- fsf_size = entry_ptr->size;
-
- /* Release the space on disk */
- if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
- } /* end if ( free_file_space ) */
-
- /* Reset the pointer to the cache the entry is within. -QAK */
- entry_ptr->cache_ptr = NULL;
-
- /* increment entries_removed_counter and set
- * last_entry_removed_ptr. As we are likely abuut to
- * free the entry, recall that last_entry_removed_ptr
- * must NEVER be dereferenced.
- *
- * Recall that these fields are maintained to allow functions
- * that perform scans of lists of entries to detect the
- * unexpected removal of entries (via expunge, eviction,
- * or take ownership at present), so that they can re-start
- * their scans if necessary.
- *
- * Also check if the entry we are watching for removal is being
- * removed (usually the 'next' entry for an iteration) and reset
- * it to indicate that it was removed.
- */
- cache_ptr->entries_removed_counter++;
- cache_ptr->last_entry_removed_ptr = entry_ptr;
- if (entry_ptr == cache_ptr->entry_watched_for_removal)
- cache_ptr->entry_watched_for_removal = NULL;
-
- /* Check for actually destroying the entry in memory */
- /* (As opposed to taking ownership of it) */
- if (destroy_entry) {
- if (entry_ptr->is_dirty) {
- /* Reset dirty flag */
- entry_ptr->is_dirty = FALSE;
+ if (H5C__mark_flush_dep_clean(entry_ptr) < 0)
- /* If the entry's type has a 'notify' callback send a 'entry cleaned'
- * notice now that the entry is fully integrated into the cache.
- */
- if (entry_ptr->type->notify &&
- (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify client about entry dirty flag cleared")
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKCLEAN, FAIL, "Can't propagate flush dep clean flag")
+ }
+ } /* end if */
+ } /* end else */
- /* we are about to discard the in core representation --
- * set the magic field to bad magic so we can detect a
- * freed entry if we see one.
- */
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+ /* reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
- /* verify that the image has been freed */
- HDassert(entry_ptr->image_ptr == NULL);
+ /* capture the cache entry address for the log_flush call at the
+ * end before the entry_ptr gets freed
+ */
+ entry_addr = entry_ptr->addr;
- if (entry_ptr->type->free_icr((void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
- } /* end if */
- else {
- HDassert(take_ownership);
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
+ *
+ * Now discard the entry if appropriate.
+ */
+ if (destroy) {
- /* client is taking ownership of the entry.
- * set bad magic here too so the cache will choke
- * unless the entry is re-inserted properly
- */
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
- } /* end else */
- } /* if (destroy) */
+ /* Sanity check */
+ HDassert(0 == entry_ptr->flush_dep_nparents);
+
+ /* if both suppress_image_entry_frees and entry_ptr->include_in_image
+ * are true, simply set entry_ptr->image_ptr to NULL, as we have
+ * another pointer to the buffer in an instance of H5C_image_entry_t
+ * in cache_ptr->image_entries.
+ *
+ * Otherwise, free the buffer if it exists.
+ */
+ if (suppress_image_entry_frees && entry_ptr->include_in_image) {
- /* Check if we have to update the page buffer with cleared entries
- * so it doesn't go out of date
+ entry_ptr->image_ptr = NULL;
+ }
+ else if (entry_ptr->image_ptr != NULL) {
+
+ entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ }
+
+ /* If the entry is not a prefetched entry, verify that the flush
+ * dependency parents addresses array has been transferred.
+ *
+ * If the entry is prefetched, the free_isr routine will dispose of
+ * the flush dependency parents addresses array if necessary.
*/
+ if (!entry_ptr->prefetched) {
- /* VFD SWMR TODO: Think on this, and decide if we need to extend
- * this for multi page metadata entries.
+ HDassert(0 == entry_ptr->fd_parent_count);
+ HDassert(NULL == entry_ptr->fd_parent_addrs);
+
+ } /* end if */
+
+ /* Check whether we should free the space in the file that
+ * the entry occupies
*/
- if (update_page_buffer) {
- /* Sanity check */
- HDassert(!destroy);
- HDassert(entry_ptr->image_ptr);
+ if (free_file_space) {
- if ((f->shared->pb_ptr) && (f->shared->pb_ptr->page_size >= entry_ptr->size)) {
+ hsize_t fsf_size;
- if (H5PB_update_entry(f->shared->pb_ptr, entry_ptr->addr, entry_ptr->size,
- entry_ptr->image_ptr) > 0)
+ /* Sanity checks */
+ HDassert(H5F_addr_defined(entry_ptr->addr));
+ HDassert(!H5F_IS_TMP_ADDR(f, entry_ptr->addr));
+#ifndef NDEBUG
+ {
+ size_t curr_len;
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
+ /* Get the actual image size for the thing again */
+ entry_ptr->type->image_len((void *)entry_ptr, &curr_len);
+ HDassert(curr_len == entry_ptr->size);
}
- } /* end if */
+#endif /* NDEBUG */
- if (cache_ptr->log_flush)
- if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
+ /* If the file space free size callback is defined, use
+ * it to get the size of the block of file space to free.
+ * Otherwise use entry_ptr->size.
+ */
+ if (entry_ptr->type->fsf_size) {
-done:
- HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty));
+ if ((entry_ptr->type->fsf_size)((void *)entry_ptr, &fsf_size) < 0)
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flush_single_entry() */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to get file space free size")
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__verify_len_eoa
- *
- * Purpose: Verify that 'len' does not exceed eoa when 'actual' is
- * false i.e. 'len" is the initial speculative length from
- * get_load_size callback with null image pointer.
- * If exceed, adjust 'len' accordingly.
- *
- * Verify that 'len' should not exceed eoa when 'actual' is
- * true i.e. 'len' is the actual length from get_load_size
- * callback with non-null image pointer.
- * If exceed, return error.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: Vailin Choi
- * 9/6/15
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__verify_len_eoa(H5F_t * f, const H5C_class_t *type, haddr_t addr, size_t *len,
- hbool_t actual)
- {
- H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */
- haddr_t eoa; /* End-of-allocation in the file */
- herr_t ret_value = SUCCEED; /* Return value */
+ } /* end if */
+ else { /* no file space free size callback -- use entry size */
- FUNC_ENTER_STATIC
+ fsf_size = entry_ptr->size;
+ }
+
+ /* Release the space on disk */
+ if (H5MF_xfree(f, entry_ptr->type->mem_type, entry_ptr->addr, fsf_size) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "unable to free file space for cache entry")
- /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
- * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
- * Thus we do the same for purposes of computing the EOA
- * for sanity checks.
+ } /* end if ( free_file_space ) */
+
+ /* Reset the pointer to the cache the entry is within. -QAK */
+ entry_ptr->cache_ptr = NULL;
+
+ /* increment entries_removed_counter and set
+ * last_entry_removed_ptr. As we are likely abuut to
+ * free the entry, recall that last_entry_removed_ptr
+ * must NEVER be dereferenced.
+ *
+ * Recall that these fields are maintained to allow functions
+ * that perform scans of lists of entries to detect the
+ * unexpected removal of entries (via expunge, eviction,
+ * or take ownership at present), so that they can re-start
+ * their scans if necessary.
+ *
+ * Also check if the entry we are watching for removal is being
+ * removed (usually the 'next' entry for an iteration) and reset
+ * it to indicate that it was removed.
*/
- cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
+ cache_ptr->entries_removed_counter++;
+ cache_ptr->last_entry_removed_ptr = entry_ptr;
- /* Get the file's end-of-allocation value */
- eoa = H5F_get_eoa(f, cooked_type);
- if (!H5F_addr_defined(eoa))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
+ if (entry_ptr == cache_ptr->entry_watched_for_removal) {
- /* Check for bad address in general */
- if (H5F_addr_gt(addr, eoa))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
+ cache_ptr->entry_watched_for_removal = NULL;
+ }
- /* Check if the amount of data to read will be past the EOA */
- if (H5F_addr_gt((addr + *len), eoa)) {
- if (actual)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
- else
- /* Trim down the length of the metadata */
- *len = (size_t)(eoa - addr);
- } /* end if */
+ /* Check for actually destroying the entry in memory */
+ /* (As opposed to taking ownership of it) */
+ if (destroy_entry) {
- if (*len <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
+ if (entry_ptr->is_dirty) {
-done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__verify_len_eoa() */
+ /* Reset dirty flag */
+ entry_ptr->is_dirty = FALSE;
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_load_entry
- *
- * Purpose: Attempt to load the entry at the specified disk address
- * and with the specified type into memory. If successful.
- * return the in memory address of the entry. Return NULL
- * on failure.
- *
- * Note that this function simply loads the entry into
- * core. It does not insert it into the cache.
- *
- * Return: Non-NULL on success / NULL on failure.
- *
- * Programmer: John Mainzer, 5/18/04
- *
- * Changes: Please maintain the change list and do not delete entries
- * unless the have been folded into the header comment.
- *
- * Reverted optimization that avoided re-reading the prefix
- * of a metadata entry when a speculative read proved too
- * small.
- * JRM -- 3/25/20
- *
- * Added macro calls to maintain the page buffer read hints.
- *
- * JRM -- 3/20/20
- *
- *-------------------------------------------------------------------------
- */
- static void *H5C_load_entry(H5F_t * f,
-#ifdef H5_HAVE_PARALLEL
- hbool_t coll_access,
-#endif /* H5_HAVE_PARALLEL */
- const H5C_class_t *type, haddr_t addr, void *udata)
- {
- hbool_t dirty = FALSE; /* Flag indicating whether thing */
- /* was dirtied during deserialize */
- uint8_t * image = NULL; /* Buffer for disk image */
- void * thing = NULL; /* Pointer to thing loaded */
- H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as */
- /* cache entry */
-#if 0
- size_t init_len;
-#endif
- size_t len; /* Size of image in file */
-#ifdef H5_HAVE_PARALLEL
- int mpi_rank = 0; /* MPI process rank */
- MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */
- int mpi_code; /* MPI error code */
-#endif /* H5_HAVE_PARALLEL */
- void *ret_value = NULL; /* Return value */
+ /* If the entry's type has a 'notify' callback send a
+ * 'entry cleaned' notice now that the entry is fully
+ * integrated into the cache.
+ */
+ if ((entry_ptr->type->notify) &&
+ ((entry_ptr->type->notify)(H5C_NOTIFY_ACTION_ENTRY_CLEANED, entry_ptr) < 0))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify client about entry dirty flag cleared")
- FUNC_ENTER_NOAPI_NOINIT
+ } /* end if */
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->cache);
- HDassert(f->shared->cache->magic == H5C__H5C_T_MAGIC);
+ /* we are about to discard the in core representation --
+ * set the magic field to bad magic so we can detect a
+ * freed entry if we see one.
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
- /* if this is a VFD SWMR reader, verify that the page size is defined */
- HDassert((!f->shared->cache->vfd_swmr_reader) || (f->shared->cache->page_size > 0));
+ /* verify that the image has been freed */
+ HDassert(entry_ptr->image_ptr == NULL);
- HDassert(type);
- HDassert(H5F_addr_defined(addr));
- HDassert(type->get_initial_load_size);
+ if (entry_ptr->type->free_icr((void *)entry_ptr) < 0)
- if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
- HDassert(type->get_final_load_size);
- }
+ } /* end if */
else {
- HDassert(NULL == type->get_final_load_size);
- }
+ HDassert(take_ownership);
- HDassert(type->deserialize);
+ /* client is taking ownership of the entry.
+ * set bad magic here too so the cache will choke
+ * unless the entry is re-inserted properly
+ */
+ entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
- /* Can't see how skip reads could be usefully combined with
- * the speculative read flag. Hence disallow.
- */
- HDassert(
- !((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
+ } /* end else */
+ } /* if (destroy) */
- /* Call the get_initial_load_size callback, to retrieve the initial
- * size of image
- */
- if (type->get_initial_load_size(udata, &len) < 0)
+ /* Check if we have to update the page buffer with cleared entries
+ * so it doesn't go out of date
+ */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
+ /* VFD SWMR TODO: Think on this, and decide if we need to extend
+ * this for multi page metadata entries.
+ */
+ if (update_page_buffer) {
- HDassert(len > 0);
+ /* Sanity check */
+ HDassert(!destroy);
+ HDassert(entry_ptr->image_ptr);
-#if 0
- init_len = len;
-#endif
+ if ((f->shared->page_buf) && (f->shared->page_buf->page_size >= entry_ptr->size)) {
- /* Check for possible speculative read off the end of the file */
- if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) {
+ if (H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size,
+ entry_ptr->image_ptr) > 0)
- if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
+ } /* end if */
+ } /* end if */
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
- }
+ if (cache_ptr->log_flush) {
- /* Allocate the buffer for reading the on-disk entry image */
- if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
+ if ((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
-#ifdef H5_HAVE_PARALLEL
- if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
+done:
- if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
+ HDassert((ret_value != SUCCEED) || (destroy_entry) || (!entry_ptr->flush_in_progress));
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
+ HDassert((ret_value != SUCCEED) || (destroy_entry) || (take_ownership) || (!entry_ptr->is_dirty));
- if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
+ FUNC_LEAVE_NOAPI(ret_value)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
+} /* H5C__flush_single_entry() */
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__verify_len_eoa
+ *
+ * Purpose: Verify that 'len' does not exceed eoa when 'actual' is
+ * false i.e. 'len" is the initial speculative length from
+ * get_load_size callback with null image pointer.
+ * If exceed, adjust 'len' accordingly.
+ *
+ * Verify that 'len' should not exceed eoa when 'actual' is
+ * true i.e. 'len' is the actual length from get_load_size
+ * callback with non-null image pointer.
+ * If exceed, return error.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Vailin Choi
+ * 9/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len, hbool_t actual)
+{
+ H5FD_mem_t cooked_type; /* Modified type, accounting for switching global heaps */
+ haddr_t eoa; /* End-of-allocation in the file */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Get the on-disk entry image */
- if (0 == (type->flags & H5C__CLASS_SKIP_READS)) {
-
- unsigned tries; /* The # of retries */
- htri_t chk_ret; /* return from verify_chksum callback */
- size_t actual_len = len; /* The actual length, after speculative */
- /* reads have been resolved */
- void * new_image; /* Pointer to image */
- hbool_t len_changed = TRUE; /* Whether to re-check speculative */
- /* entries */
- bool do_try;
- h5_retry_t retry;
-
- /*
- * This do/while loop performs the following till the metadata checksum
- * is correct or the file's number of allowed read attempts are reached.
- * --read the metadata
- * --determine the actual size of the metadata
- * --perform checksum verification
- */
- for (do_try = h5_retry_init(&retry, H5F_GET_READ_ATTEMPTS(f), 1, H5_RETRY_ONE_HOUR / 3600 / 100);
- do_try; do_try = h5_retry_next(&retry)) {
- if (actual_len != len) {
+ FUNC_ENTER_STATIC
- if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
+ /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
+ * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
+ * Thus we do the same for purposes of computing the EOA
+ * for sanity checks.
+ */
+ cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ /* Get the file's end-of-allocation value */
+ eoa = H5F_get_eoa(f, cooked_type);
+ if (!H5F_addr_defined(eoa))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid EOA address for file")
- image = (uint8_t *)new_image;
+ /* Check for bad address in general */
+ if (H5F_addr_gt(addr, eoa))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
-#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
+ /* Check if the amount of data to read will be past the EOA */
+ if (H5F_addr_gt((addr + *len), eoa)) {
+ if (actual)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
+ else
+ /* Trim down the length of the metadata */
+ *len = (size_t)(eoa - addr);
+ } /* end if */
+
+ if (*len <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__verify_len_eoa() */
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__load_entry
+ *
+ * Purpose: Attempt to load the entry at the specified disk address
+ * and with the specified type into memory. If successful.
+ * return the in memory address of the entry. Return NULL
+ * on failure.
+ *
+ * Note that this function simply loads the entry into
+ * core. It does not insert it into the cache.
+ *
+ * Return: Non-NULL on success / NULL on failure.
+ *
+ * Programmer: John Mainzer, 5/18/04
+ *
+ * Changes: Please maintain the change list and do not delete entries
+ * unless the have been folded into the header comment.
+ *
+ * Reverted optimization that avoided re-reading the prefix
+ * of a metadata entry when a speculative read proved too
+ * small.
+ * JRM -- 3/25/20
+ *
+ * Added macro calls to maintain the page buffer read hints.
+ *
+ * JRM -- 3/20/20
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5C__load_entry(H5F_t *f,
#ifdef H5_HAVE_PARALLEL
- if (!coll_access || 0 == mpi_rank) {
+ hbool_t coll_access,
#endif /* H5_HAVE_PARALLEL */
+ const H5C_class_t *type, haddr_t addr, void *udata)
+{
+ hbool_t dirty = FALSE; /* Flag indicating whether thing */
+ /* was dirtied during deserialize */
+ uint8_t * image = NULL; /* Buffer for disk image */
+ void * thing = NULL; /* Pointer to thing loaded */
+ H5C_cache_entry_t *entry = NULL; /* Alias for thing loaded, as */
+ /* cache entry */
+#if 0
+size_t init_len;
+#endif
+ size_t len; /* Size of image in file */
+#ifdef H5_HAVE_PARALLEL
+ int mpi_rank = 0; /* MPI process rank */
+ MPI_Comm comm = MPI_COMM_NULL; /* File MPI Communicator */
+ int mpi_code; /* MPI error code */
+#endif /* H5_HAVE_PARALLEL */
+ void *ret_value = NULL; /* Return value */
- H5C__SET_PB_READ_HINTS(f->shared->cache, type, TRUE)
+ FUNC_ENTER_STATIC
- if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) {
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(f->shared->cache->magic == H5C__H5C_T_MAGIC);
- H5C__RESET_PB_READ_HINTS(f->shared->cache)
+ /* if this is a VFD SWMR reader, verify that the page size is defined */
+ HDassert((!f->shared->cache->vfd_swmr_reader) || (f->shared->cache->page_size > 0));
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
- }
+ HDassert(type);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(type->get_initial_load_size);
+ if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
+ HDassert(type->get_final_load_size);
+ else
+ HDassert(NULL == type->get_final_load_size);
+ HDassert(type->deserialize);
- H5C__RESET_PB_READ_HINTS(f->shared->cache)
+ /* Can't see how skip reads could be usefully combined with
+ * the speculative read flag. Hence disallow.
+ */
+ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) && (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
+
+ /* Call the get_initial_load_size callback, to retrieve the initial size of image */
+ if (type->get_initial_load_size(udata, &len) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
+ HDassert(len > 0);
+
+#if 0
+init_len = len;
+#endif
+
+ /* Check for possible speculative read off the end of the file */
+ if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)
+ if (H5C__verify_len_eoa(f, type, addr, &len, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA")
+
+ /* Allocate the buffer for reading the on-disk entry image */
+ if (NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
#ifdef H5_HAVE_PARALLEL
- } /* end if */
- /* if the collective metadata read optimization is turned on,
- * bcast the metadata read from process 0 to all ranks in the file
- * communicator
- */
- if (coll_access) {
+ if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
+ if ((mpi_rank = H5F_mpi_get_rank(f)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
+ if ((comm = H5F_mpi_get_comm(f)) == MPI_COMM_NULL)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- int buf_size;
+ /* Get the on-disk entry image */
+ if (0 == (type->flags & H5C__CLASS_SKIP_READS)) {
+ unsigned tries; /* The # of retries */
+ htri_t chk_ret; /* return from verify_chksum callback */
+ size_t actual_len = len; /* The actual length, after speculative */
+ /* reads have been resolved */
+ void * new_image; /* Pointer to image */
+ hbool_t len_changed = TRUE; /* Whether to re-check speculative */
+ /* entries */
+ bool do_try;
+ h5_retry_t retry;
+
+ /*
+ * This do/while loop performs the following till the metadata checksum
+ * is correct or the file's number of allowed read attempts are reached.
+ * --read the metadata
+ * --determine the actual size of the metadata
+ * --perform checksum verification
+ */
+ for (do_try = h5_retry_init(&retry, H5F_GET_READ_ATTEMPTS(f), 1, H5_RETRY_ONE_HOUR / 3600 / 100);
+ do_try; do_try = h5_retry_next(&retry)) {
+ if (actual_len != len) {
+ if (NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ H5MM_memcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
- H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
- if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
+#ifdef H5_HAVE_PARALLEL
+ if (!coll_access || 0 == mpi_rank) {
+#endif /* H5_HAVE_PARALLEL */
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ H5C__SET_PB_READ_HINTS(f->shared->cache, type, TRUE)
- /* If the entry could be read speculatively and the length is still
- * changing, check for updating the actual size
- */
- if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && (len_changed)) {
+ if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) {
- /* Retrieve the actual length */
- actual_len = len;
- if (type->get_final_load_size(image, len, udata, &actual_len) < 0) {
+ H5C__RESET_PB_READ_HINTS(f->shared->cache)
- /* Transfer control to while() and count towards retries */
- continue;
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+ }
- /* Check for the length changing */
- if (actual_len != len) {
+ H5C__RESET_PB_READ_HINTS(f->shared->cache)
- /* Verify that the length isn't past the EOA for
- * the file
- */
- if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
+#ifdef H5_HAVE_PARALLEL
+ } /* end if */
+ /* if the collective metadata read optimization is turned on,
+ * bcast the metadata read from process 0 to all ranks in the file
+ * communicator
+ */
+ if (coll_access) {
+ int buf_size;
+
+ H5_CHECKED_ASSIGN(buf_size, int, len, size_t);
+ if (MPI_SUCCESS != (mpi_code = MPI_Bcast(image, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
+ /* If the entry could be read speculatively and the length is still
+ * changing, check for updating the actual size
+ */
+ if ((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) && (len_changed)) {
- /* Expand buffer to new size */
- if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
+ /* Retrieve the actual length */
+ actual_len = len;
+ if (type->get_final_load_size(image, len, udata, &actual_len) < 0) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ /* Transfer control to while() and count towards retries */
+ continue;
+ }
- image = (uint8_t *)new_image;
+ /* Check for the length changing */
+ if (actual_len != len) {
+ /* Verify that the length isn't past the EOA for the file */
+ if (H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
+ /* Expand buffer to new size */
+ if (NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- if (actual_len > len) {
+ if (actual_len > len) {
#ifdef H5_HAVE_PARALLEL
- if (!coll_access || 0 == mpi_rank) {
+ if (!coll_access || 0 == mpi_rank) {
#endif /* H5_HAVE_PARALLEL */
#if 0 /* JRM */
- /* If the thing's image needs to be bigger for
- * a speculatively loaded thing, go get the
- * on-disk image again (the extra portion).
- */
- if ( H5F_block_read(f, type->mem_type, addr + len,
- actual_len - len, image + len) < 0)
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
- "can't read image")
-#else /* JRM */
-
- /* the original version of this code re-read
- * the entire buffer. At some point, someone
- * reworked this code to avoid re-reading the
- * initial portion of the buffer.
- *
- * In addition to being of questionable utility,
- * this optimization changed the invarient that
- * that metadata is read and written atomically.
- * While this didn't cause immediate problems,
- * the page buffer in VFD SWMR depends on this
- * invarient in its management of multi-page
- * metadata entries.
- *
- * To repair this issue, I have reverted to
- * the original algorithm for managing the
- * speculative load case. Note that I have
- * done so crudely -- before merge, we should
- * remove the infrastructure that supports the
- * optimization.
- *
- * We should also verify my impression that the
- * that the optimization is of no measurable
- * value. If it is, we will put it back, but
- * disable it in the VFD SWMR case.
- *
- * While this issue was detected in the global
- * heap case, note that the super bloc, the
- * local heap, and the fractal heap also use
- * speculative loads.
- *
- * JRM -- 3/24/20
+ /* If the thing's image needs to be bigger for
+ * a speculatively loaded thing, go get the
+ * on-disk image again (the extra portion).
*/
+ if ( H5F_block_read(f, type->mem_type, addr + len,
+ actual_len - len, image + len) < 0)
- H5C__SET_PB_READ_HINTS(f->shared->cache, type, FALSE);
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
+ "can't read image")
+#else /* JRM */
- if (H5F_block_read(f, type->mem_type, addr, actual_len, image) < 0) {
+ /* the original version of this code re-read
+ * the entire buffer. At some point, someone
+ * reworked this code to avoid re-reading the
+ * initial portion of the buffer.
+ *
+ * In addition to being of questionable utility,
+ * this optimization changed the invarient that
+ * that metadata is read and written atomically.
+ * While this didn't cause immediate problems,
+ * the page buffer in VFD SWMR depends on this
+ * invarient in its management of multi-page
+ * metadata entries.
+ *
+ * To repair this issue, I have reverted to
+ * the original algorithm for managing the
+ * speculative load case. Note that I have
+ * done so crudely -- before merge, we should
+ * remove the infrastructure that supports the
+ * optimization.
+ *
+ * We should also verify my impression that the
+ * that the optimization is of no measurable
+ * value. If it is, we will put it back, but
+ * disable it in the VFD SWMR case.
+ *
+ * While this issue was detected in the global
+ * heap case, note that the super bloc, the
+ * local heap, and the fractal heap also use
+ * speculative loads.
+ *
+ * JRM -- 3/24/20
+ */
- H5C__RESET_PB_READ_HINTS(f->shared->cache)
+ H5C__SET_PB_READ_HINTS(f->shared->cache, type, FALSE);
+
+ if (H5F_block_read(f, type->mem_type, addr, actual_len, image) < 0) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
- }
H5C__RESET_PB_READ_HINTS(f->shared->cache)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ }
+ H5C__RESET_PB_READ_HINTS(f->shared->cache)
#endif /* JRM */
#ifdef H5_HAVE_PARALLEL
- }
- /* If the collective metadata read optimization is
- * turned on, Bcast the metadata read from process
- * 0 to all ranks in the file communicator
- */
- if (coll_access) {
-
- int buf_size;
-
- H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
-
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
-
- HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
- } /* end if */
- } /* end if (actual_len != len) */
- else {
- /* The length has stabilized */
- len_changed = FALSE;
-
- /* Set the final length */
- len = actual_len;
- } /* else */
- } /* end if */
-
- /* If there's no way to verify the checksum for a piece of metadata
- * (usually because there's no checksum in the file), leave now
- */
- if (type->verify_chksum == NULL)
- break;
+ }
+ /* If the collective metadata read optimization is turned on,
+ * Bcast the metadata read from process 0 to all ranks in the file
+ * communicator */
+ if (coll_access) {
+ int buf_size;
+
+ H5_CHECKED_ASSIGN(buf_size, int, actual_len - len, size_t);
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Bcast(image + len, buf_size, MPI_BYTE, 0, comm)))
+ HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+ } /* end if */
+ } /* end if (actual_len != len) */
+ else {
+ /* The length has stabilized */
+ len_changed = FALSE;
- /* Verify the checksum for the metadata image */
- if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
+ /* Set the final length */
+ len = actual_len;
+ } /* else */
+ } /* end if */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
+ /* If there's no way to verify the checksum for a piece of metadata
+ * (usually because there's no checksum in the file), leave now
+ */
+ if (type->verify_chksum == NULL)
+ break;
- if (chk_ret == TRUE)
- break;
- }
+ /* Verify the checksum for the metadata image */
+ if ((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "failure from verify_chksum callback")
+ if (chk_ret == TRUE)
+ break;
+ }
- /* Check for too many tries */
- if (!do_try) {
+ /* Check for too many tries */
+ if (!do_try) {
#if 0 /* JRM */
- haddr_t eoa;
- int64_t page = (int64_t)(addr / f->shared->cache->page_size);
-
- eoa = H5F_get_eoa(f, type->mem_type);
-
- HDfprintf(stderr, "addr = 0x%llx, init_len = %lld, len = %lld\n",
- (int64_t)addr, (int64_t)init_len, (int64_t)len);
- HDfprintf(stderr, "type = %s, eoa = 0x%llx, tick = %lld\n",
- type->name, (int64_t)eoa, f->shared->tick_num);
- HDfprintf(stderr, "page = %lld, index_len = %d\n",
- page, f->shared->mdf_idx_entries_used);
- H5FD_vfd_swmr_dump_status(f->shared->lf, page);
+ haddr_t eoa;
+ int64_t page = (int64_t)(addr / f->shared->cache->page_size);
+
+ eoa = H5F_get_eoa(f, type->mem_type);
+
+ HDfprintf(stderr, "addr = 0x%llx, init_len = %lld, len = %lld\n",
+ (int64_t)addr, (int64_t)init_len, (int64_t)len);
+ HDfprintf(stderr, "type = %s, eoa = 0x%llx, tick = %lld\n",
+ type->name, (int64_t)eoa, f->shared->tick_num);
+ HDfprintf(stderr, "page = %lld, index_len = %d\n",
+ page, f->shared->mdf_idx_entries_used);
+ H5FD_vfd_swmr_dump_status(f->shared->lf, page);
#endif /* JRM */
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL,
- "incorrect metadata checksum after all read attempts addr %" PRIuHADDR
- " size %zu",
- addr, len);
- }
-
- /* Calculate and track the # of retries */
- if ((tries = h5_retry_tries(&retry)) > 1) { /* Does not track 0 retry */
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL,
+ "incorrect metadata checksum after all read attempts addr %" PRIuHADDR
+ " size %zu",
+ addr, len);
+ }
- if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, tries - 1) < 0)
+ /* Calculate and track the # of retries */
+ if ((tries = h5_retry_tries(&retry)) > 1) { /* Does not track 0 retry */
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", tries)
- }
+ if (H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, tries - 1) < 0)
- /* Set the final length (in case it wasn't set earlier) */
- len = actual_len;
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", tries)
+ }
- } /* end if !H5C__CLASS_SKIP_READS */
+ /* Set the final length (in case it wasn't set earlier) */
+ len = actual_len;
+ } /* end if !H5C__CLASS_SKIP_READS */
- /* Deserialize the on-disk image into the native memory form */
- if (NULL == (thing = type->deserialize(image, len, udata, &dirty)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
+ /* Deserialize the on-disk image into the native memory form */
+ if (NULL == (thing = type->deserialize(image, len, udata, &dirty)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
- entry = (H5C_cache_entry_t *)thing;
+ entry = (H5C_cache_entry_t *)thing;
- /* In general, an entry should be clean just after it is loaded.
- *
- * However, when this code is used in the metadata cache, it is
- * possible that object headers will be dirty at this point, as
- * the deserialize function will alter object headers if necessary to
- * fix an old bug.
- *
- * In the following assert:
- *
- * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
- *
- * note that type ids 5 & 6 are associated with object headers in the
- * metadata cache.
- *
- * When we get to using H5C for other purposes, we may wish to
- * tighten up the assert so that the loophole only applies to the
- * metadata cache.
- */
+ /* In general, an entry should be clean just after it is loaded.
+ *
+ * However, when this code is used in the metadata cache, it is
+ * possible that object headers will be dirty at this point, as
+ * the deserialize function will alter object headers if necessary to
+ * fix an old bug.
+ *
+ * In the following assert:
+ *
+ * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
+ *
+ * note that type ids 5 & 6 are associated with object headers in the
+ * metadata cache.
+ *
+ * When we get to using H5C for other purposes, we may wish to
+ * tighten up the assert so that the loophole only applies to the
+ * metadata cache.
+ */
- HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
-
- entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- entry->cache_ptr = f->shared->cache;
- entry->addr = addr;
- entry->size = len;
- HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
- entry->image_ptr = image;
- entry->image_up_to_date = !dirty;
- entry->type = type;
- entry->is_dirty = dirty;
- entry->dirtied = FALSE;
- entry->is_protected = FALSE;
- entry->is_read_only = FALSE;
- entry->ro_ref_count = 0;
- entry->is_pinned = FALSE;
- entry->in_slist = FALSE;
- entry->flush_marker = FALSE;
+ HDassert((dirty == FALSE) || (type->id == 5 || type->id == 6));
+
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry->cache_ptr = f->shared->cache;
+ entry->addr = addr;
+ entry->size = len;
+ HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
+ entry->image_ptr = image;
+ entry->image_up_to_date = !dirty;
+ entry->type = type;
+ entry->is_dirty = dirty;
+ entry->dirtied = FALSE;
+ entry->is_protected = FALSE;
+ entry->is_read_only = FALSE;
+ entry->ro_ref_count = 0;
+ entry->is_pinned = FALSE;
+ entry->in_slist = FALSE;
+ entry->flush_marker = FALSE;
#ifdef H5_HAVE_PARALLEL
- entry->clear_on_unprotect = FALSE;
- entry->flush_immediately = FALSE;
- entry->coll_access = coll_access;
+ entry->clear_on_unprotect = FALSE;
+ entry->flush_immediately = FALSE;
+ entry->coll_access = coll_access;
#endif /* H5_HAVE_PARALLEL */
- entry->flush_in_progress = FALSE;
- entry->destroy_in_progress = FALSE;
-
- entry->ring = H5C_RING_UNDEFINED;
-
- /* Initialize flush dependency fields */
- entry->flush_dep_parent = NULL;
- entry->flush_dep_nparents = 0;
- entry->flush_dep_parent_nalloc = 0;
- entry->flush_dep_nchildren = 0;
- entry->flush_dep_ndirty_children = 0;
- entry->flush_dep_nunser_children = 0;
- entry->ht_next = NULL;
- entry->ht_prev = NULL;
- entry->il_next = NULL;
- entry->il_prev = NULL;
-
- entry->next = NULL;
- entry->prev = NULL;
+ entry->flush_in_progress = FALSE;
+ entry->destroy_in_progress = FALSE;
+
+ entry->ring = H5C_RING_UNDEFINED;
+
+ /* Initialize flush dependency fields */
+ entry->flush_dep_parent = NULL;
+ entry->flush_dep_nparents = 0;
+ entry->flush_dep_parent_nalloc = 0;
+ entry->flush_dep_nchildren = 0;
+ entry->flush_dep_ndirty_children = 0;
+ entry->flush_dep_nunser_children = 0;
+ entry->ht_next = NULL;
+ entry->ht_prev = NULL;
+ entry->il_next = NULL;
+ entry->il_prev = NULL;
+
+ entry->next = NULL;
+ entry->prev = NULL;
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- entry->aux_next = NULL;
- entry->aux_prev = NULL;
+ entry->aux_next = NULL;
+ entry->aux_prev = NULL;
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
#ifdef H5_HAVE_PARALLEL
- entry->coll_next = NULL;
- entry->coll_prev = NULL;
+ entry->coll_next = NULL;
+ entry->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
- /* initialize cache image related fields */
- entry->include_in_image = FALSE;
- entry->lru_rank = 0;
- entry->image_dirty = FALSE;
- entry->fd_parent_count = 0;
- entry->fd_parent_addrs = NULL;
- entry->fd_child_count = 0;
- entry->fd_dirty_child_count = 0;
- entry->image_fd_height = 0;
- entry->prefetched = FALSE;
- entry->prefetch_type_id = 0;
- entry->age = 0;
- entry->prefetched_dirty = FALSE;
+ /* initialize cache image related fields */
+ entry->include_in_image = FALSE;
+ entry->lru_rank = 0;
+ entry->image_dirty = FALSE;
+ entry->fd_parent_count = 0;
+ entry->fd_parent_addrs = NULL;
+ entry->fd_child_count = 0;
+ entry->fd_dirty_child_count = 0;
+ entry->image_fd_height = 0;
+ entry->prefetched = FALSE;
+ entry->prefetch_type_id = 0;
+ entry->age = 0;
+ entry->prefetched_dirty = FALSE;
#ifndef NDEBUG /* debugging field */
- entry->serialization_count = 0;
+ entry->serialization_count = 0;
#endif /* NDEBUG */
- /* initialize tag list fields */
- entry->tl_next = NULL;
- entry->tl_prev = NULL;
- entry->tag_info = NULL;
+ /* initialize tag list fields */
+ entry->tl_next = NULL;
+ entry->tl_prev = NULL;
+ entry->tag_info = NULL;
- /* initialize fields supporting VFD SWMR */
- if (f->shared->cache->vfd_swmr_reader) {
+ /* initialize fields supporting VFD SWMR */
+ if (f->shared->cache->vfd_swmr_reader) {
- entry->page = (addr / f->shared->cache->page_size);
- }
- else {
+ entry->page = (addr / f->shared->cache->page_size);
+ }
+ else {
- entry->page = 0;
- }
- entry->refreshed_in_tick = 0;
- entry->pi_next = NULL;
- entry->pi_prev = NULL;
+ entry->page = 0;
+ }
+ entry->refreshed_in_tick = 0;
+ entry->pi_next = NULL;
+ entry->pi_prev = NULL;
- H5C__RESET_CACHE_ENTRY_STATS(entry);
+ H5C__RESET_CACHE_ENTRY_STATS(entry);
- ret_value = thing;
+ ret_value = thing;
done:
- /* Cleanup on error */
- if (NULL == ret_value) {
- /* Release resources */
- if (thing && type->free_icr(thing) < 0)
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
- if (image)
- image = (uint8_t *)H5MM_xfree(image);
- } /* end if */
+ /* Cleanup on error */
+ if (NULL == ret_value) {
+ /* Release resources */
+ if (thing && type->free_icr(thing) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "free_icr callback failed")
+ if (image)
+ image = (uint8_t *)H5MM_xfree(image);
+ } /* end if */
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_load_entry() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__load_entry() */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__make_space_in_cache
- *
- * Purpose: Attempt to evict cache entries until the index_size
- * is at least needed_space below max_cache_size.
- *
- * In passing, also attempt to bring cLRU_list_size to a
- * value greater than min_clean_size.
- *
- * Depending on circumstances, both of these goals may
- * be impossible, as in parallel mode, we must avoid generating
- * a write as part of a read (to avoid deadlock in collective
- * I/O), and in all cases, it is possible (though hopefully
- * highly unlikely) that the protected list may exceed the
- * maximum size of the cache.
- *
- * Thus the function simply does its best, returning success
- * unless an error is encountered.
- *
- * Observe that this function cannot occasion a read.
- *
- * Return: Non-negative on success/Negative on failure.
- *
- * Programmer: John Mainzer, 5/14/04
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed, hbool_t write_permitted)
- {
- H5C_t *cache_ptr = f->shared->cache;
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__make_space_in_cache
+ *
+ * Purpose: Attempt to evict cache entries until the index_size
+ * is at least needed_space below max_cache_size.
+ *
+ * In passing, also attempt to bring cLRU_list_size to a
+ * value greater than min_clean_size.
+ *
+ * Depending on circumstances, both of these goals may
+ * be impossible, as in parallel mode, we must avoid generating
+ * a write as part of a read (to avoid deadlock in collective
+ * I/O), and in all cases, it is possible (though hopefully
+ * highly unlikely) that the protected list may exceed the
+ * maximum size of the cache.
+ *
+ * Thus the function simply does its best, returning success
+ * unless an error is encountered.
+ *
+ * Observe that this function cannot occasion a read.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 5/14/04
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
+{
+ H5C_t *cache_ptr = f->shared->cache;
#if H5C_COLLECT_CACHE_STATS
- int32_t clean_entries_skipped = 0;
- int32_t dirty_pf_entries_skipped = 0;
- int32_t total_entries_scanned = 0;
+ int32_t clean_entries_skipped = 0;
+ int32_t dirty_pf_entries_skipped = 0;
+ int32_t total_entries_scanned = 0;
#endif /* H5C_COLLECT_CACHE_STATS */
- uint32_t entries_examined = 0;
- uint32_t initial_list_len;
- size_t empty_space;
- hbool_t reentrant_call = FALSE;
- hbool_t prev_is_dirty = FALSE;
- hbool_t didnt_flush_entry = FALSE;
- hbool_t restart_scan;
- H5C_cache_entry_t *entry_ptr;
- H5C_cache_entry_t *prev_ptr;
- H5C_cache_entry_t *next_ptr;
- uint32_t num_corked_entries = 0;
- herr_t ret_value = SUCCEED; /* Return value */
+ uint32_t entries_examined = 0;
+ uint32_t initial_list_len;
+ size_t empty_space;
+ hbool_t reentrant_call = FALSE;
+ hbool_t prev_is_dirty = FALSE;
+ hbool_t didnt_flush_entry = FALSE;
+ hbool_t restart_scan;
+ H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *prev_ptr;
+ H5C_cache_entry_t *next_ptr;
+ uint32_t num_corked_entries = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_PACKAGE
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
-
- /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
- * is a re-entrant call via a client callback called in the make
- * space in cache process. To avoid an infinite recursion, set
- * reentrant_call to TRUE, and goto done.
- */
- if (cache_ptr->msic_in_progress) {
- reentrant_call = TRUE;
- HGOTO_DONE(SUCCEED);
- } /* end if */
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
- cache_ptr->msic_in_progress = TRUE;
+ /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the make
+ * space in cache process. To avoid an infinite recursion, set
+ * reentrant_call to TRUE, and goto done.
+ */
+ if (cache_ptr->msic_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED);
+ } /* end if */
- if (write_permitted) {
- restart_scan = FALSE;
- initial_list_len = cache_ptr->LRU_list_len;
- entry_ptr = cache_ptr->LRU_tail_ptr;
+ cache_ptr->msic_in_progress = TRUE;
- if (cache_ptr->index_size >= cache_ptr->max_cache_size)
- empty_space = 0;
- else
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
+ if (write_permitted) {
+ restart_scan = FALSE;
+ initial_list_len = cache_ptr->LRU_list_len;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size)
+ empty_space = 0;
+ else
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) ||
- ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) &&
- (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
+ while ((((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) ||
+ ((empty_space + cache_ptr->clean_index_size) < (cache_ptr->min_clean_size))) &&
+ (entries_examined <= (2 * initial_list_len)) && (entry_ptr != NULL)) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
- next_ptr = entry_ptr->next;
- prev_ptr = entry_ptr->prev;
+ next_ptr = entry_ptr->next;
+ prev_ptr = entry_ptr->prev;
- if (prev_ptr != NULL)
- prev_is_dirty = prev_ptr->is_dirty;
+ if (prev_ptr != NULL)
+ prev_is_dirty = prev_ptr->is_dirty;
- if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
+ if (entry_ptr->is_dirty && (entry_ptr->tag_info && entry_ptr->tag_info->corked)) {
- /* Skip "dirty" corked entries. */
- ++num_corked_entries;
- didnt_flush_entry = TRUE;
- }
- else if (((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && (!entry_ptr->flush_in_progress) &&
- (!entry_ptr->prefetched_dirty)) {
+ /* Skip "dirty" corked entries. */
+ ++num_corked_entries;
+ didnt_flush_entry = TRUE;
+ }
+ else if (((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) && (!entry_ptr->flush_in_progress) &&
+ (!entry_ptr->prefetched_dirty)) {
- didnt_flush_entry = FALSE;
+ didnt_flush_entry = FALSE;
- if (entry_ptr->is_dirty) {
+ if (entry_ptr->is_dirty) {
#if H5C_COLLECT_CACHE_STATS
- if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) {
+ if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) {
- cache_ptr->entries_scanned_to_make_space++;
- }
+ cache_ptr->entries_scanned_to_make_space++;
+ }
#endif /* H5C_COLLECT_CACHE_STATS */
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
- if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ if (H5C__flush_single_entry(f, entry_ptr, H5C__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- if ((cache_ptr->entries_removed_counter > 1) ||
- (cache_ptr->last_entry_removed_ptr == prev_ptr))
+ if ((cache_ptr->entries_removed_counter > 1) ||
+ (cache_ptr->last_entry_removed_ptr == prev_ptr))
- restart_scan = TRUE;
- }
- else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
+ restart_scan = TRUE;
+ }
+ else if ((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size
#ifdef H5_HAVE_PARALLEL
- && !(entry_ptr->coll_access)
+ && !(entry_ptr->coll_access)
#endif /* H5_HAVE_PARALLEL */
- ) {
+ ) {
#if H5C_COLLECT_CACHE_STATS
- cache_ptr->entries_scanned_to_make_space++;
+ cache_ptr->entries_scanned_to_make_space++;
#endif /* H5C_COLLECT_CACHE_STATS */
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- }
- else {
- /* We have enough space so don't flush clean entry. */
+ if (H5C__flush_single_entry(f, entry_ptr,
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ }
+ else {
+ /* We have enough space so don't flush clean entry. */
#if H5C_COLLECT_CACHE_STATS
- clean_entries_skipped++;
+ clean_entries_skipped++;
#endif /* H5C_COLLECT_CACHE_STATS */
- didnt_flush_entry = TRUE;
- }
+ didnt_flush_entry = TRUE;
+ }
#if H5C_COLLECT_CACHE_STATS
- total_entries_scanned++;
+ total_entries_scanned++;
#endif /* H5C_COLLECT_CACHE_STATS */
- }
- else {
+ }
+ else {
- /* Skip epoch markers, entries that are in the process
- * of being flushed, and entries marked as prefetched_dirty
- * (occurs in the R/O case only).
- */
- didnt_flush_entry = TRUE;
+ /* Skip epoch markers, entries that are in the process
+ * of being flushed, and entries marked as prefetched_dirty
+ * (occurs in the R/O case only).
+ */
+ didnt_flush_entry = TRUE;
#if H5C_COLLECT_CACHE_STATS
- if (entry_ptr->prefetched_dirty)
- dirty_pf_entries_skipped++;
+ if (entry_ptr->prefetched_dirty)
+ dirty_pf_entries_skipped++;
#endif /* H5C_COLLECT_CACHE_STATS */
- }
-
- if (prev_ptr != NULL) {
+ }
- if (didnt_flush_entry) {
+ if (prev_ptr != NULL) {
- /* epoch markers don't get flushed, and we don't touch
- * entries that are in the process of being flushed.
- * Hence no need for sanity checks, as we haven't
- * flushed anything. Thus just set entry_ptr to prev_ptr
- * and go on.
- */
- entry_ptr = prev_ptr;
- }
- else if ((restart_scan) || (prev_ptr->is_dirty != prev_is_dirty) ||
- (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) ||
- (prev_ptr->is_pinned)) {
+ if (didnt_flush_entry) {
- /* something has happened to the LRU -- start over
- * from the tail.
- */
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
- }
- else {
+ /* epoch markers don't get flushed, and we don't touch
+ * entries that are in the process of being flushed.
+ * Hence no need for sanity checks, as we haven't
+ * flushed anything. Thus just set entry_ptr to prev_ptr
+ * and go on.
+ */
+ entry_ptr = prev_ptr;
+ }
+ else if ((restart_scan) || (prev_ptr->is_dirty != prev_is_dirty) ||
+ (prev_ptr->next != next_ptr) || (prev_ptr->is_protected) || (prev_ptr->is_pinned)) {
- entry_ptr = prev_ptr;
- }
+ /* something has happened to the LRU -- start over
+ * from the tail.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
}
else {
- entry_ptr = NULL;
+ entry_ptr = prev_ptr;
}
+ }
+ else {
- entries_examined++;
+ entry_ptr = NULL;
+ }
- if (cache_ptr->index_size >= cache_ptr->max_cache_size) {
+ entries_examined++;
- empty_space = 0;
- }
- else {
+ if (cache_ptr->index_size >= cache_ptr->max_cache_size) {
- empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
+ empty_space = 0;
+ }
+ else {
- HDassert(cache_ptr->index_size ==
- (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+ empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
}
+ HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
+ }
+
#if H5C_COLLECT_CACHE_STATS
- cache_ptr->calls_to_msic++;
+ cache_ptr->calls_to_msic++;
- cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
- cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
- cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
+ cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
+ cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
+ cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
- if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) {
+ if (clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic) {
- cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
- }
+ cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
+ }
- if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
- cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
+ if (dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
+ cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
- if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) {
+ if (total_entries_scanned > cache_ptr->max_entries_scanned_in_msic) {
- cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
- }
+ cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
+ }
#endif /* H5C_COLLECT_CACHE_STATS */
- /* NEED: work on a better assert for corked entries */
- HDassert((entries_examined > (2 * initial_list_len)) ||
- ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
- cache_ptr->max_cache_size) ||
- ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) ||
- ((num_corked_entries)));
+ /* NEED: work on a better assert for corked entries */
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ ((cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
+ cache_ptr->max_cache_size) ||
+ ((cache_ptr->clean_index_size + empty_space) >= cache_ptr->min_clean_size) ||
+ ((num_corked_entries)));
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- HDassert((entries_examined > (2 * initial_list_len)) ||
- (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size));
- HDassert((entries_examined > (2 * initial_list_len)) ||
- (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size));
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ (cache_ptr->cLRU_list_size <= cache_ptr->clean_index_size));
+ HDassert((entries_examined > (2 * initial_list_len)) ||
+ (cache_ptr->dLRU_list_size <= cache_ptr->dirty_index_size));
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
- }
- else {
+ }
+ else {
- HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
+ HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- initial_list_len = cache_ptr->cLRU_list_len;
- entry_ptr = cache_ptr->cLRU_tail_ptr;
+ initial_list_len = cache_ptr->cLRU_list_len;
+ entry_ptr = cache_ptr->cLRU_tail_ptr;
- while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
- (entries_examined <= initial_list_len) && (entry_ptr != NULL)) {
- HDassert(!(entry_ptr->is_protected));
- HDassert(!(entry_ptr->is_read_only));
- HDassert((entry_ptr->ro_ref_count) == 0);
- HDassert(!(entry_ptr->is_dirty));
+ while (((cache_ptr->index_size + space_needed) > cache_ptr->max_cache_size) &&
+ (entries_examined <= initial_list_len) && (entry_ptr != NULL)) {
+ HDassert(!(entry_ptr->is_protected));
+ HDassert(!(entry_ptr->is_read_only));
+ HDassert((entry_ptr->ro_ref_count) == 0);
+ HDassert(!(entry_ptr->is_dirty));
- prev_ptr = entry_ptr->aux_prev;
+ prev_ptr = entry_ptr->aux_prev;
- if ((!(entry_ptr->prefetched_dirty))
+ if ((!(entry_ptr->prefetched_dirty))
#ifdef H5_HAVE_PARALLEL
- && (!(entry_ptr->coll_access))
+ && (!(entry_ptr->coll_access))
#endif /* H5_HAVE_PARALLEL */
- ) {
- if (H5C__flush_single_entry(f, entry_ptr,
- H5C__FLUSH_INVALIDATE_FLAG |
- H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
+ ) {
+ if (H5C__flush_single_entry(
+ f, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- } /* end if */
+ } /* end if */
- /* we are scanning the clean LRU, so the serialize function
- * will not be called on any entry -- thus there is no
- * concern about the list being modified out from under
- * this function.
- */
+ /* we are scanning the clean LRU, so the serialize function
+ * will not be called on any entry -- thus there is no
+ * concern about the list being modified out from under
+ * this function.
+ */
- entry_ptr = prev_ptr;
- entries_examined++;
- }
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ entry_ptr = prev_ptr;
+ entries_examined++;
}
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ }
done:
- /* Sanity checks */
- HDassert(cache_ptr->msic_in_progress);
- if (!reentrant_call)
- cache_ptr->msic_in_progress = FALSE;
- HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
+ /* Sanity checks */
+ HDassert(cache_ptr->msic_in_progress);
+ if (!reentrant_call)
+ cache_ptr->msic_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__make_space_in_cache() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__make_space_in_cache() */
/*-------------------------------------------------------------------------
*
- * Function: H5C_validate_lru_list
+ * Function: H5C__validate_lru_list
*
* Purpose: Debugging function that scans the LRU list for errors.
*
- * If an error is detected, the function generates a
- * diagnostic and returns FAIL. If no error is detected,
- * the function returns SUCCEED.
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
@@ -7617,95 +8345,75 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
+static herr_t
+H5C__validate_lru_list(H5C_t *cache_ptr)
+{
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
- static herr_t H5C_validate_lru_list(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) &&
- (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr)) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- }
-
- if (cache_ptr->LRU_list_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
-
- if ((cache_ptr->LRU_list_len == 1) &&
- ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
- (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size))) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- }
-
- if ((cache_ptr->LRU_list_len >= 1) &&
- ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
- (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL))) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- }
+ FUNC_ENTER_STATIC
- entry_ptr = cache_ptr->LRU_head_ptr;
- while (entry_ptr != NULL) {
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
+ if (((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_tail_ptr == NULL)) &&
+ (cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ if (cache_ptr->LRU_list_len < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
- if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
+ if ((cache_ptr->LRU_list_len == 1) &&
+ ((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
+ (cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ if ((cache_ptr->LRU_list_len >= 1) &&
+ ((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
+ (cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- if ((entry_ptr->is_pinned) || (entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache)) {
+ entry_ptr = cache_ptr->LRU_head_ptr;
+ while (entry_ptr != NULL) {
+ if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
- }
+ if ((entry_ptr->is_pinned) || (entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- if ((cache_ptr->LRU_list_len != len) || (cache_ptr->LRU_list_size != size)) {
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ if ((cache_ptr->LRU_list_len != len) || (cache_ptr->LRU_list_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
done:
+ if (ret_value != SUCCEED)
+ HDassert(0);
- if (ret_value != SUCCEED) {
-
- HDassert(0);
- }
-
- FUNC_LEAVE_NOAPI(ret_value)
-
- } /* H5C_validate_lru_list() */
-
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__validate_lru_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/*-------------------------------------------------------------------------
*
- * Function: H5C_validate_pinned_entry_list
+ * Function: H5C__validate_pinned_entry_list
*
* Purpose: Debugging function that scans the pinned entry list for
* errors.
*
- * If an error is detected, the function generates a
- * diagnostic and returns FAIL. If no error is detected,
- * the function returns SUCCEED.
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
@@ -7718,100 +8426,78 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
+static herr_t
+H5C__validate_pinned_entry_list(H5C_t *cache_ptr)
+{
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
- static herr_t H5C_validate_pinned_entry_list(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) &&
- (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr)) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- }
-
- if (cache_ptr->pel_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
-
- if ((cache_ptr->pel_len == 1) &&
- ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
- (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size))) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- }
-
- if ((cache_ptr->pel_len >= 1) &&
- ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
- (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL))) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- }
-
- entry_ptr = cache_ptr->pel_head_ptr;
- while (entry_ptr != NULL) {
+ FUNC_ENTER_STATIC
- if ((entry_ptr != cache_ptr->pel_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ if (((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_tail_ptr == NULL)) &&
+ (cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
+ if (cache_ptr->pel_len < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ if ((cache_ptr->pel_len == 1) &&
+ ((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
+ (cache_ptr->pel_head_ptr->size != cache_ptr->pel_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- if (!entry_ptr->is_pinned) {
+ if ((cache_ptr->pel_len >= 1) &&
+ ((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
+ (cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while (entry_ptr != NULL) {
+ if ((entry_ptr != cache_ptr->pel_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- if (!((entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache))) {
+ if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ if (!entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
- }
+ if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- if ((cache_ptr->pel_len != len) || (cache_ptr->pel_size != size)) {
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
- }
+ if ((cache_ptr->pel_len != len) || (cache_ptr->pel_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
done:
+ if (ret_value != SUCCEED)
+ HDassert(0);
- if (ret_value != SUCCEED) {
-
- HDassert(0);
- }
-
- FUNC_LEAVE_NOAPI(ret_value)
-
- } /* H5C_validate_pinned_entry_list() */
-
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__validate_pinned_entry_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/*-------------------------------------------------------------------------
*
- * Function: H5C_validate_protected_entry_list
+ * Function: H5C__validate_protected_entry_list
*
* Purpose: Debugging function that scans the protected entry list for
* errors.
*
- * If an error is detected, the function generates a
- * diagnostic and returns FAIL. If no error is detected,
- * the function returns SUCCEED.
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
@@ -7824,98 +8510,78 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
+static herr_t
+H5C__validate_protected_entry_list(H5C_t *cache_ptr)
+{
+ int32_t len = 0;
+ size_t size = 0;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
- static herr_t H5C_validate_protected_entry_list(H5C_t * cache_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
- int32_t len = 0;
- size_t size = 0;
- H5C_cache_entry_t *entry_ptr = NULL;
-
- FUNC_ENTER_NOAPI_NOINIT
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) &&
- (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
-
- if (cache_ptr->pl_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
-
- if ((cache_ptr->pl_len == 1) &&
- ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
- (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size))) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- }
-
- if ((cache_ptr->pl_len >= 1) &&
- ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
- (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL))) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- }
-
- entry_ptr = cache_ptr->pl_head_ptr;
- while (entry_ptr != NULL) {
+ FUNC_ENTER_STATIC
- if ((entry_ptr != cache_ptr->pl_head_ptr) &&
- ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr))) {
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- }
+ if (((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_tail_ptr == NULL)) &&
+ (cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
- ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr))) {
+ if (cache_ptr->pl_len < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- }
+ if ((cache_ptr->pl_len == 1) &&
+ ((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
+ (cache_ptr->pl_head_ptr->size != cache_ptr->pl_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
- if (!entry_ptr->is_protected) {
+ if ((cache_ptr->pl_len >= 1) &&
+ ((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
+ (cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- }
+ entry_ptr = cache_ptr->pl_head_ptr;
+ while (entry_ptr != NULL) {
+ if ((entry_ptr != cache_ptr->pl_head_ptr) &&
+ ((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
- if ((entry_ptr->is_read_only) && (entry_ptr->ro_ref_count <= 0)) {
+ if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
+ ((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- }
+ if (!entry_ptr->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
- len++;
- size += entry_ptr->size;
- entry_ptr = entry_ptr->next;
- }
+ if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
- if ((cache_ptr->pl_len != len) || (cache_ptr->pl_size != size)) {
+ len++;
+ size += entry_ptr->size;
+ entry_ptr = entry_ptr->next;
+ }
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
- }
+ if ((cache_ptr->pl_len != len) || (cache_ptr->pl_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
done:
+ if (ret_value != SUCCEED)
+ HDassert(0);
- if (ret_value != SUCCEED) {
-
- HDassert(0);
- }
-
- FUNC_LEAVE_NOAPI(ret_value)
-
- } /* H5C_validate_protected_entry_list() */
-
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__validate_protected_entry_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/*-------------------------------------------------------------------------
*
- * Function: H5C_entry_in_skip_list
+ * Function: H5C__entry_in_skip_list
*
* Purpose: Debugging function that scans skip list to see if it
- * is in present. We need this, as it is possible for
- * an entry to be in the skip list twice.
+ * is in present. We need this, as it is possible for
+ * an entry to be in the skip list twice.
*
* Return: FALSE if the entry is not in the skip list, and TRUE
- * if it is.
+ * if it is.
*
* Programmer: John Mainzer, 11/1/14
*
@@ -7926,1256 +8592,1297 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_SLIST_SANITY_CHECKS
+static hbool_t
+H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
+{
+ H5SL_node_t *node_ptr;
+ hbool_t in_slist;
- static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t * target_ptr)
- {
- hbool_t in_slist = FALSE;
- H5SL_node_t * node_ptr = NULL;
- H5C_cache_entry_t *entry_ptr = NULL;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ in_slist = FALSE;
+ while ((node_ptr != NULL) && (!in_slist)) {
+ H5C_cache_entry_t *entry_ptr;
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- while ((node_ptr != NULL) && (!in_slist)) {
- entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ if (entry_ptr == target_ptr)
+ in_slist = TRUE;
+ else
+ node_ptr = H5SL_next(node_ptr);
+ }
- if (entry_ptr == target_ptr) {
+ return (in_slist);
+} /* H5C__entry_in_skip_list() */
+#endif /* H5C_DO_SLIST_SANITY_CHECKS */
- in_slist = TRUE;
- }
- else {
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C__flush_marked_entries
+ *
+ * Purpose: Flushes all marked entries in the cache.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Mike McGreevy
+ * November 3, 2010
+ *
+ * Changes: Modified function to setup the slist before calling
+ * H%C_flush_cache(), and take it down afterwards. Note
+ * that the slist need not be empty after the call to
+ * H5C_flush_cache() since we are only flushing marked
+ * entries. Thus must set the clear_slist parameter
+ * of H5C_set_slist_enabled to TRUE.
+ *
+ * JRM -- 5/6/20
+ *
+ *-------------------------------------------------------------------------
+ */
- node_ptr = H5SL_next(node_ptr);
- }
- }
+herr_t
+H5C__flush_marked_entries(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED;
- return (in_slist);
+ FUNC_ENTER_PACKAGE
- } /* H5C_entry_in_skip_list() */
-#endif /* H5C_DO_SLIST_SANITY_CHECKS */
+ /* Assertions */
+ HDassert(f != NULL);
- /*-------------------------------------------------------------------------
- *
- * Function: H5C__flush_marked_entries
- *
- * Purpose: Flushes all marked entries in the cache.
- *
- * Return: FAIL if error is detected, SUCCEED otherwise.
- *
- * Programmer: Mike McGreevy
- * November 3, 2010
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__flush_marked_entries(H5F_t * f)
- {
- herr_t ret_value = SUCCEED;
+ /* Enable the slist, as it is needed in the flush */
+ if (H5C_set_slist_enabled(f->shared->cache, TRUE, FALSE) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed")
+
+ /* Flush all marked entries */
+ if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
- FUNC_ENTER_PACKAGE
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
- /* Assertions */
- HDassert(f != NULL);
+ /* Disable the slist. Set the clear_slist parameter to TRUE
+ * since we called H5C_flush_cache() with the
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG.
+ */
+ if (H5C_set_slist_enabled(f->shared->cache, FALSE, TRUE) < 0)
- /* Flush all marked entries */
- if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed")
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__flush_marked_entries */
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_cork
- *
- * Purpose: To cork/uncork/get cork status of an object depending on "action":
- * H5C__SET_CORK:
- * To cork the object
- * Return error if the object is already corked
- * H5C__UNCORK:
- * To uncork the obejct
- * Return error if the object is not corked
- * H5C__GET_CORKED:
- * To retrieve the cork status of an object in
- * the parameter "corked"
- *
- * Return: Success: Non-negative
- * Failure: Negative
- *
- * Programmer: Vailin Choi
- * January 2014
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C_cork(H5C_t * cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
- {
- H5C_tag_info_t *tag_info; /* Points to a tag info struct */
- herr_t ret_value = SUCCEED;
+ FUNC_LEAVE_NOAPI(ret_value)
- FUNC_ENTER_NOAPI_NOINIT
+} /* H5C__flush_marked_entries */
- /* Assertions */
- HDassert(cache_ptr != NULL);
- HDassert(H5F_addr_defined(obj_addr));
- HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED);
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_cork
+ *
+ * Purpose: To cork/uncork/get cork status of an object depending on "action":
+ * H5C__SET_CORK:
+ * To cork the object
+ * Return error if the object is already corked
+ * H5C__UNCORK:
+ * To uncork the obejct
+ * Return error if the object is not corked
+ * H5C__GET_CORKED:
+ * To retrieve the cork status of an object in
+ * the parameter "corked"
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi
+ * January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
+{
+ H5C_tag_info_t *tag_info; /* Points to a tag info struct */
+ herr_t ret_value = SUCCEED;
- /* Search the list of corked object addresses in the cache */
- tag_info = (H5C_tag_info_t *)H5SL_search(cache_ptr->tag_list, &obj_addr);
+ FUNC_ENTER_NOAPI_NOINIT
- if (H5C__GET_CORKED == action) {
- HDassert(corked);
- if (tag_info != NULL && tag_info->corked)
- *corked = TRUE;
- else
- *corked = FALSE;
- } /* end if */
- else {
- /* Sanity check */
- HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
-
- /* Perform appropriate action */
- if (H5C__SET_CORK == action) {
- /* Check if this is the first entry for this tagged object */
- if (NULL == tag_info) {
- /* Allocate new tag info struct */
- if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry")
-
- /* Set the tag for all entries */
- tag_info->tag = obj_addr;
-
- /* Insert tag info into skip list */
- if (H5SL_insert(cache_ptr->tag_list, tag_info, &(tag_info->tag)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert tag info in skip list")
- } /* end if */
- else {
- /* Check for object already corked */
- if (tag_info->corked)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked")
- HDassert(tag_info->entry_cnt > 0 && tag_info->head);
- } /* end else */
+ /* Assertions */
+ HDassert(cache_ptr != NULL);
+ HDassert(H5F_addr_defined(obj_addr));
+ HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED);
- /* Set the corked status for the entire object */
- tag_info->corked = TRUE;
- cache_ptr->num_objs_corked++;
+ /* Search the list of corked object addresses in the cache */
+ tag_info = (H5C_tag_info_t *)H5SL_search(cache_ptr->tag_list, &obj_addr);
+ if (H5C__GET_CORKED == action) {
+ HDassert(corked);
+ if (tag_info != NULL && tag_info->corked)
+ *corked = TRUE;
+ else
+ *corked = FALSE;
+ } /* end if */
+ else {
+ /* Sanity check */
+ HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
+
+ /* Perform appropriate action */
+ if (H5C__SET_CORK == action) {
+ /* Check if this is the first entry for this tagged object */
+ if (NULL == tag_info) {
+ /* Allocate new tag info struct */
+ if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry")
+
+ /* Set the tag for all entries */
+ tag_info->tag = obj_addr;
+
+ /* Insert tag info into skip list */
+ if (H5SL_insert(cache_ptr->tag_list, tag_info, &(tag_info->tag)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "can't insert tag info in skip list")
} /* end if */
else {
- /* Sanity check */
- HDassert(tag_info);
+ /* Check for object already corked */
+ if (tag_info->corked)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCORK, FAIL, "object already corked")
+ HDassert(tag_info->entry_cnt > 0 && tag_info->head);
+ } /* end else */
- /* Check for already uncorked */
- if (!tag_info->corked)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked")
+ /* Set the corked status for the entire object */
+ tag_info->corked = TRUE;
+ cache_ptr->num_objs_corked++;
- /* Set the corked status for the entire object */
- tag_info->corked = FALSE;
- cache_ptr->num_objs_corked--;
+ } /* end if */
+ else {
+ /* Sanity check */
+ HDassert(tag_info);
- /* Remove the tag info from the tag list, if there's no more entries with this tag */
- if (0 == tag_info->entry_cnt) {
- /* Sanity check */
- HDassert(NULL == tag_info->head);
+ /* Check for already uncorked */
+ if (!tag_info->corked)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNCORK, FAIL, "object already uncorked")
- if (H5SL_remove(cache_ptr->tag_list, &(tag_info->tag)) != tag_info)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove tag info from list")
+ /* Set the corked status for the entire object */
+ tag_info->corked = FALSE;
+ cache_ptr->num_objs_corked--;
- /* Release the tag info */
- tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
- } /* end if */
- else
- HDassert(NULL != tag_info->head);
- } /* end else */
- } /* end else */
+ /* Remove the tag info from the tag list, if there's no more entries with this tag */
+ if (0 == tag_info->entry_cnt) {
+ /* Sanity check */
+ HDassert(NULL == tag_info->head);
-done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C_cork() */
+ if (H5SL_remove(cache_ptr->tag_list, &(tag_info->tag)) != tag_info)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove tag info from list")
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_dirty()
- *
- * Purpose: Recursively propagate the flush_dep_ndirty_children flag
- * up the dependency chain in response to entry either
- * becoming dirty or having its flush_dep_ndirty_children
- * increased from 0.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Neil Fortner
- * 11/13/12
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry)
- {
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ /* Release the tag info */
+ tag_info = H5FL_FREE(H5C_tag_info_t, tag_info);
+ } /* end if */
+ else
+ HDassert(NULL != tag_info->head);
+ } /* end else */
+ } /* end else */
- FUNC_ENTER_STATIC
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_cork() */
- /* Sanity checks */
- HDassert(entry);
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_dirty()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming dirty or having its flush_dep_ndirty_children
+ * increased from 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_dirty(H5C_cache_entry_t *entry)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Iterate over the parent entries, if any */
- for (u = 0; u < entry->flush_dep_nparents; u++) {
- /* Sanity check */
- HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children <
- entry->flush_dep_parent[u]->flush_dep_nchildren);
+ FUNC_ENTER_STATIC
- /* Adjust the parent's number of dirty children */
- entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
+ /* Sanity checks */
+ HDassert(entry);
- /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
- if (entry->flush_dep_parent[u]->type->notify &&
- (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED,
- entry->flush_dep_parent[u]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag set")
- } /* end for */
+ /* Iterate over the parent entries, if any */
+ for (u = 0; u < entry->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[u]->flush_dep_ndirty_children <
+ entry->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[u]->flush_dep_ndirty_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry dirtied' notice */
+ if (entry->flush_dep_parent[u]->type->notify &&
+ (entry->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_DIRTIED,
+ entry->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag set")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_dirty() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_dirty() */
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_clean()
- *
- * Purpose: Recursively propagate the flush_dep_ndirty_children flag
- * up the dependency chain in response to entry either
- * becoming clean or having its flush_dep_ndirty_children
- * reduced to 0.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Neil Fortner
- * 11/13/12
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry)
- {
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_clean()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming clean or having its flush_dep_ndirty_children
+ * reduced to 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_clean(H5C_cache_entry_t *entry)
+{
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(entry);
+ /* Sanity checks */
+ HDassert(entry);
- /* Iterate over the parent entries, if any */
- /* Note reverse iteration order, in case the callback removes the flush
- * dependency - QAK, 2017/08/12
- */
- for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
- /* Sanity check */
- HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
+ /* Iterate over the parent entries, if any */
+ /* Note reverse iteration order, in case the callback removes the flush
+ * dependency - QAK, 2017/08/12
+ */
+ for (i = ((int)entry->flush_dep_nparents) - 1; i >= 0; i--) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
- /* Adjust the parent's number of dirty children */
- entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
- /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
- if (entry->flush_dep_parent[i]->type->notify &&
- (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED,
- entry->flush_dep_parent[i]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry dirty flag reset")
- } /* end for */
+ /* If the parent has a 'notify' callback, send a 'child entry cleaned' notice */
+ if (entry->flush_dep_parent[i]->type->notify &&
+ (entry->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_CLEANED,
+ entry->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry dirty flag reset")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_clean() */
-
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_serialized()
- *
- * Purpose: Decrement the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
- * the target entry becoming serialized.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 8/30/16
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr)
- {
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_clean() */
- /* Sanity checks */
- HDassert(entry_ptr);
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_serialized()
+ *
+ * Purpose: Decrement the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming serialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr)
+{
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- /* Iterate over the parent entries, if any */
- /* Note reverse iteration order, in case the callback removes the flush
- * dependency - QAK, 2017/08/12
- */
- for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
- /* Sanity checks */
- HDassert(entry_ptr->flush_dep_parent);
- HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
+ FUNC_ENTER_STATIC
- /* decrement the parents number of unserialized children */
- entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
+ /* Sanity checks */
+ HDassert(entry_ptr);
- /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
- if (entry_ptr->flush_dep_parent[i]->type->notify &&
- (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED,
- entry_ptr->flush_dep_parent[i]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag set")
- } /* end for */
+ /* Iterate over the parent entries, if any */
+ /* Note reverse iteration order, in case the callback removes the flush
+ * dependency - QAK, 2017/08/12
+ */
+ for (i = ((int)entry_ptr->flush_dep_nparents) - 1; i >= 0; i--) {
+ /* Sanity checks */
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children > 0);
+
+ /* decrement the parents number of unserialized children */
+ entry_ptr->flush_dep_parent[i]->flush_dep_nunser_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if (entry_ptr->flush_dep_parent[i]->type->notify &&
+ (entry_ptr->flush_dep_parent[i]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED,
+ entry_ptr->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag set")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_serialized() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_serialized() */
- /*-------------------------------------------------------------------------
- * Function: H5C__mark_flush_dep_unserialized()
- *
- * Purpose: Increment the flush_dep_nunser_children fields of all the
- * target entry's flush dependency parents in response to
- * the target entry becoming unserialized.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 8/30/16
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr)
- {
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_unserialized()
+ *
+ * Purpose: Increment the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming unserialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(entry_ptr);
+ /* Sanity checks */
+ HDassert(entry_ptr);
- /* Iterate over the parent entries, if any */
- for (u = 0; u < entry_ptr->flush_dep_nparents; u++) {
- /* Sanity check */
- HDassert(entry_ptr->flush_dep_parent);
- HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
- entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
-
- /* increment parents number of usserialized children */
- entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
-
- /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
- if (entry_ptr->flush_dep_parent[u]->type->notify &&
- (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED,
- entry_ptr->flush_dep_parent[u]) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "can't notify parent about child entry serialized flag reset")
- } /* end for */
+ /* Iterate over the parent entries, if any */
+ for (u = 0; u < entry_ptr->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
+ entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* increment parents number of usserialized children */
+ entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if (entry_ptr->flush_dep_parent[u]->type->notify &&
+ (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED,
+ entry_ptr->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
+ "can't notify parent about child entry serialized flag reset")
+ } /* end for */
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__mark_flush_dep_unserialized() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_unserialized() */
#ifndef NDEBUG
- /*-------------------------------------------------------------------------
- * Function: H5C__assert_flush_dep_nocycle()
- *
- * Purpose: Assert recursively that base_entry is not the same as
- * entry, and perform the same assertion on all of entry's
- * flush dependency parents. This is used to detect cycles
- * created by flush dependencies.
- *
- * Return: void
- *
- * Programmer: Neil Fortner
- * 12/10/12
- *
- *-------------------------------------------------------------------------
- */
- static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry,
- const H5C_cache_entry_t *base_entry)
- {
- unsigned u; /* Local index variable */
+/*-------------------------------------------------------------------------
+ * Function: H5C__assert_flush_dep_nocycle()
+ *
+ * Purpose: Assert recursively that base_entry is not the same as
+ * entry, and perform the same assertion on all of entry's
+ * flush dependency parents. This is used to detect cycles
+ * created by flush dependencies.
+ *
+ * Return: void
+ *
+ * Programmer: Neil Fortner
+ * 12/10/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry, const H5C_cache_entry_t *base_entry)
+{
+ unsigned u; /* Local index variable */
- FUNC_ENTER_STATIC_NOERR
+ FUNC_ENTER_STATIC_NOERR
- /* Sanity checks */
- HDassert(entry);
- HDassert(base_entry);
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(base_entry);
- /* Make sure the entries are not the same */
- HDassert(base_entry != entry);
+ /* Make sure the entries are not the same */
+ HDassert(base_entry != entry);
- /* Iterate over entry's parents (if any) */
- for (u = 0; u < entry->flush_dep_nparents; u++)
- H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
+ /* Iterate over entry's parents (if any) */
+ for (u = 0; u < entry->flush_dep_nparents; u++)
+ H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[u], base_entry);
- FUNC_LEAVE_NOAPI_VOID
- } /* H5C__assert_flush_dep_nocycle() */
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5C__assert_flush_dep_nocycle() */
#endif /* NDEBUG */
- /*-------------------------------------------------------------------------
- * Function: H5C__serialize_cache
- *
- * Purpose: Serialize (i.e. construct an on disk image) for all entries
- * in the metadata cache including clean entries.
- *
- * Note that flush dependencies and "flush me last" flags
- * must be observed in the serialization process.
- *
- * Note also that entries may be loaded, flushed, evicted,
- * expunged, relocated, resized, or removed from the cache
- * during this process, just as these actions may occur during
- * a regular flush.
- *
- * However, we are given that the cache will contain no protected
- * entries on entry to this routine (although entries may be
- * briefly protected and then unprotected during the serialize
- * process).
- *
- * The objective of this routine is serialize all entries and
- * to force all entries into their actual locations on disk.
- *
- * The initial need for this routine is to settle all entries
- * in the cache prior to construction of the metadata cache
- * image so that the size of the cache image can be calculated.
- * However, I gather that other uses for the routine are
- * under consideration.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 7/22/15
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__serialize_cache(H5F_t * f)
- {
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_cache
+ *
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
+ *
+ * Note that flush dependencies and "flush me last" flags
+ * must be observed in the serialization process.
+ *
+ * Note also that entries may be loaded, flushed, evicted,
+ * expunged, relocated, resized, or removed from the cache
+ * during this process, just as these actions may occur during
+ * a regular flush.
+ *
+ * However, we are given that the cache will contain no protected
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
+ *
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
+ *
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
+ * image so that the size of the cache image can be calculated.
+ * However, I gather that other uses for the routine are
+ * under consideration.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 7/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_cache(H5F_t *f)
+{
#if H5C_DO_SANITY_CHECKS
- int i;
- uint32_t index_len = 0;
- size_t index_size = (size_t)0;
- size_t clean_index_size = (size_t)0;
- size_t dirty_index_size = (size_t)0;
- size_t slist_size = (size_t)0;
- uint32_t slist_len = 0;
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- H5C_ring_t ring;
- H5C_t * cache_ptr;
- herr_t ret_value = SUCCEED;
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_PACKAGE
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(cache_ptr->slist_ptr);
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
#if H5C_DO_SANITY_CHECKS
- HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
- HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
- for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
- index_len += cache_ptr->index_ring_len[i];
- index_size += cache_ptr->index_ring_size[i];
- clean_index_size += cache_ptr->clean_index_ring_size[i];
- dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+ for (i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
- slist_len += cache_ptr->slist_ring_len[i];
- slist_size += cache_ptr->slist_ring_size[i];
- } /* end for */
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
- HDassert(cache_ptr->index_len == index_len);
- HDassert(cache_ptr->index_size == index_size);
- HDassert(cache_ptr->clean_index_size == clean_index_size);
- HDassert(cache_ptr->dirty_index_size == dirty_index_size);
- HDassert(cache_ptr->slist_len == slist_len);
- HDassert(cache_ptr->slist_size == slist_size);
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+ if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#ifndef NDEBUG
- /* if this is a debug build, set the serialization_count field of
- * each entry in the cache to zero before we start the serialization.
- * This allows us to detect the case in which any entry is serialized
- * more than once (a performance issues), and more importantly, the
- * case is which any flush depencency parent is serializes more than
- * once (a correctness issue).
- */
- {
- H5C_cache_entry_t *scan_ptr = NULL;
-
- scan_ptr = cache_ptr->il_head;
- while (scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- scan_ptr->serialization_count = 0;
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
-
- /* set cache_ptr->serialization_in_progress to TRUE, and back
- * to FALSE at the end of the function. Must maintain this flag
- * to support H5C_get_serialization_in_progress(), which is in
- * turn required to support sanity checking in some cache
- * clients.
- */
- HDassert(!cache_ptr->serialization_in_progress);
- cache_ptr->serialization_in_progress = TRUE;
+ /* if this is a debug build, set the serialization_count field of
+ * each entry in the cache to zero before we start the serialization.
+ * This allows us to detect the case in which any entry is serialized
+ * more than once (a performance issues), and more importantly, the
+ * case is which any flush depencency parent is serializes more than
+ * once (a correctness issue).
+ */
+ {
+ H5C_cache_entry_t *scan_ptr = NULL;
- /* Serialize each ring, starting from the outermost ring and
- * working inward.
- */
- ring = H5C_RING_USER;
- while (ring < H5C_RING_NTYPES) {
- HDassert(cache_ptr->close_warning_received);
- switch (ring) {
- case H5C_RING_USER:
- break;
+ scan_ptr = cache_ptr->il_head;
+ while (scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ scan_ptr->serialization_count = 0;
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
- case H5C_RING_RDFSM:
- /* Settle raw data FSM */
- if (!cache_ptr->rdfsm_settled)
- if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
- break;
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
+ * to FALSE at the end of the function. Must maintain this flag
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
+ * clients.
+ */
+ HDassert(!cache_ptr->serialization_in_progress);
+ cache_ptr->serialization_in_progress = TRUE;
- case H5C_RING_MDFSM:
- /* Settle metadata FSM */
- if (!cache_ptr->mdfsm_settled)
- if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
- break;
+ /* Serialize each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while (ring < H5C_RING_NTYPES) {
+ HDassert(cache_ptr->close_warning_received);
+ switch (ring) {
+ case H5C_RING_USER:
+ break;
- case H5C_RING_SBE:
- case H5C_RING_SB:
- break;
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if (!cache_ptr->rdfsm_settled)
+ if (H5MF_settle_raw_data_fsm(f, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
- default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
- break;
- } /* end switch */
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if (!cache_ptr->mdfsm_settled)
+ if (H5MF_settle_meta_data_fsm(f, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
+
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
- if (H5C__serialize_ring(f, ring) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
- ring++;
- } /* end while */
+ if (H5C__serialize_ring(f, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+
+ ring++;
+ } /* end while */
#ifndef NDEBUG
- /* Verify that no entry has been serialized more than once.
- * FD parents with multiple serializations should have been caught
- * elsewhere, so no specific check for them here.
- */
- {
- H5C_cache_entry_t *scan_ptr = NULL;
+ /* Verify that no entry has been serialized more than once.
+ * FD parents with multiple serializations should have been caught
+ * elsewhere, so no specific check for them here.
+ */
+ {
+ H5C_cache_entry_t *scan_ptr = NULL;
- scan_ptr = cache_ptr->il_head;
- while (scan_ptr != NULL) {
- HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(scan_ptr->serialization_count <= 1);
+ scan_ptr = cache_ptr->il_head;
+ while (scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(scan_ptr->serialization_count <= 1);
- scan_ptr = scan_ptr->il_next;
- } /* end while */
- } /* end block */
-#endif /* NDEBUG */
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
done:
- cache_ptr->serialization_in_progress = FALSE;
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__serialize_cache() */
+ cache_ptr->serialization_in_progress = FALSE;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_cache() */
- /*-------------------------------------------------------------------------
- * Function: H5C__serialize_ring
- *
- * Purpose: Serialize the entries contained in the specified cache and
- * ring. All entries in rings outside the specified ring
- * must have been serialized on entry.
- *
- * If the cache contains protected entries in the specified
- * ring, the function will fail, as protected entries cannot
- * be serialized. However all unprotected entries in the
- * target ring should be serialized before the function
- * returns failure.
- *
- * If flush dependencies appear in the target ring, the
- * function makes repeated passes through the index list
- * serializing entries in flush dependency order.
- *
- * All entries outside the H5C_RING_SBE are marked for
- * inclusion in the cache image. Entries in H5C_RING_SBE
- * and below are marked for exclusion from the image.
- *
- * Return: Non-negative on success/Negative on failure or if there was
- * a request to flush all items and something was protected.
- *
- * Programmer: John Mainzer
- * 9/11/15
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__serialize_ring(H5F_t * f, H5C_ring_t ring)
- {
- hbool_t done = FALSE;
- H5C_t * cache_ptr;
- H5C_cache_entry_t *entry_ptr;
- herr_t ret_value = SUCCEED;
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_ring
+ *
+ * Purpose: Serialize the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been serialized on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
+ * returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the index list
+ * serializing entries in flush dependency order.
+ *
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_ring(H5F_t *f, H5C_ring_t ring)
+{
+ hbool_t done = FALSE;
+ H5C_t * cache_ptr;
+ H5C_cache_entry_t *entry_ptr;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(f);
- HDassert(f->shared);
- cache_ptr = f->shared->cache;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(ring > H5C_RING_UNDEFINED);
- HDassert(ring < H5C_RING_NTYPES);
-
- HDassert(cache_ptr->serialization_in_progress);
-
- /* The objective here is to serialize all entries in the cache ring
- * in flush dependency order.
- *
- * The basic algorithm is to scan the cache index list looking for
- * unserialized entries that are either not in a flush dependency
- * relationship, or which have no unserialized children. Any such
- * entry is serialized and its flush dependency parents (if any) are
- * informed -- allowing them to decrement their userialized child counts.
- *
- * However, this algorithm is complicated by the ability
- * of client serialization callbacks to perform operations on
- * on the cache which can result in the insertion, deletion,
- * relocation, resize, dirty, flush, eviction, or removal (via the
- * take ownership flag) of entries. Changes in the flush dependency
- * structure are also possible.
- *
- * On the other hand, the algorithm is simplified by the fact that
- * we are serializing, not flushing. Thus, as long as all entries
- * are serialized correctly, it doesn't matter if we have to go back
- * and serialize an entry a second time.
- *
- * These possible actions result in the following modfications to
- * tha basic algorithm:
- *
- * 1) In the event of an entry expunge, eviction or removal, we must
- * restart the scan as it is possible that the next entry in our
- * scan is no longer in the cache. Were we to examine this entry,
- * we would be accessing deallocated memory.
- *
- * 2) A resize, dirty, or insertion of an entry may result in the
- * the increment of a flush dependency parent's dirty and/or
- * unserialized child count. In the context of serializing the
- * the cache, this is a non-issue, as even if we have already
- * serialized the parent, it will be marked dirty and its image
- * marked out of date if appropriate when the child is serialized.
- *
- * However, this is a major issue for a flush, as were this to happen
- * in a flush, it would violate the invariant that the flush dependency
- * feature is intended to enforce. As the metadata cache has no
- * control over the behavior of cache clients, it has no way of
- * preventing this behaviour. However, it should detect it if at all
- * possible.
- *
- * Do this by maintaining a count of the number of times each entry is
- * serialized during a cache serialization. If any flush dependency
- * parent is serialized more than once, throw an assertion failure.
- *
- * 3) An entry relocation will typically change the location of the
- * entry in the index list. This shouldn't cause problems as we
- * will scan the index list until we make a complete pass without
- * finding anything to serialize -- making relocations of either
- * the current or next entries irrelevant.
- *
- * Note that since a relocation may result in our skipping part of
- * the index list, we must always do at least one more pass through
- * the index list after an entry relocation.
- *
- * 4) Changes in the flush dependency structure are possible on
- * entry insertion, load, expunge, evict, or remove. Destruction
- * of a flush dependency has no effect, as it can only relax the
- * flush dependencies. Creation of a flush dependency can create
- * an unserialized child of a flush dependency parent where all
- * flush dependency children were previously serialized. Should
- * this child dirty the flush dependency parent when it is serialized,
- * the parent will be re-serialized.
- *
- * Per the discussion of 2) above, this is a non issue for cache
- * serialization, and a major problem for cache flush. Using the
- * same detection mechanism, throw an assertion failure if this
- * condition appears.
- *
- * Observe that either eviction or removal of entries as a result of
- * a serialization is not a problem as long as the flush depencency
- * tree does not change beyond the removal of a leaf.
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->serialization_in_progress);
+
+ /* The objective here is to serialize all entries in the cache ring
+ * in flush dependency order.
+ *
+ * The basic algorithm is to scan the cache index list looking for
+ * unserialized entries that are either not in a flush dependency
+ * relationship, or which have no unserialized children. Any such
+ * entry is serialized and its flush dependency parents (if any) are
+ * informed -- allowing them to decrement their userialized child counts.
+ *
+ * However, this algorithm is complicated by the ability
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
+ * relocation, resize, dirty, flush, eviction, or removal (via the
+ * take ownership flag) of entries. Changes in the flush dependency
+ * structure are also possible.
+ *
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
+ * are serialized correctly, it doesn't matter if we have to go back
+ * and serialize an entry a second time.
+ *
+ * These possible actions result in the following modfications to
+ * tha basic algorithm:
+ *
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
+ * scan is no longer in the cache. Were we to examine this entry,
+ * we would be accessing deallocated memory.
+ *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
+ * However, this is a major issue for a flush, as were this to happen
+ * in a flush, it would violate the invariant that the flush dependency
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
+ * preventing this behaviour. However, it should detect it if at all
+ * possible.
+ *
+ * Do this by maintaining a count of the number of times each entry is
+ * serialized during a cache serialization. If any flush dependency
+ * parent is serialized more than once, throw an assertion failure.
+ *
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
+ * the current or next entries irrelevant.
+ *
+ * Note that since a relocation may result in our skipping part of
+ * the index list, we must always do at least one more pass through
+ * the index list after an entry relocation.
+ *
+ * 4) Changes in the flush dependency structure are possible on
+ * entry insertion, load, expunge, evict, or remove. Destruction
+ * of a flush dependency has no effect, as it can only relax the
+ * flush dependencies. Creation of a flush dependency can create
+ * an unserialized child of a flush dependency parent where all
+ * flush dependency children were previously serialized. Should
+ * this child dirty the flush dependency parent when it is serialized,
+ * the parent will be re-serialized.
+ *
+ * Per the discussion of 2) above, this is a non issue for cache
+ * serialization, and a major problem for cache flush. Using the
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
+ *
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush depencency
+ * tree does not change beyond the removal of a leaf.
+ */
+ while (!done) {
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
*/
- while (!done) {
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ done = TRUE; /* set to FALSE if any activity in inner loop */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Verify that either the entry is already serialized, or
+ * that it is assigned to either the target or an inner
+ * ring.
*/
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
-
- done = TRUE; /* set to FALSE if any activity in inner loop */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
-
- /* Verify that either the entry is already serialized, or
- * that it is assigned to either the target or an inner
- * ring.
- */
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
- /* Skip flush me last entries or inner ring entries */
- if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
+ /* Skip flush me last entries or inner ring entries */
+ if (!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
- /* if we encounter an unserialized entry in the current
- * ring that is not marked flush me last, we are not done.
- */
- if (!entry_ptr->image_up_to_date)
- done = FALSE;
+ /* if we encounter an unserialized entry in the current
+ * ring that is not marked flush me last, we are not done.
+ */
+ if (!entry_ptr->image_up_to_date)
+ done = FALSE;
- /* Serialize the entry if its image is not up to date
- * and it has no unserialized flush dependency children.
- */
- if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
- HDassert(entry_ptr->serialization_count == 0);
+ /* Serialize the entry if its image is not up to date
+ * and it has no unserialized flush dependency children.
+ */
+ if (!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
+ HDassert(entry_ptr->serialization_count == 0);
- /* Serialize the entry */
- if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+ /* Serialize the entry */
+ if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
- /* Check for the cache being perturbed during the entry serialize */
- if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0)) {
+ /* Check for the cache being perturbed during the entry serialize */
+ if ((cache_ptr->entries_loaded_counter > 0) || (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
#if H5C_COLLECT_CACHE_STATS
- H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
#endif /* H5C_COLLECT_CACHE_STATS */
- /* Reset the counters */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
+ /* Reset the counters */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
- /* Restart scan */
- entry_ptr = cache_ptr->il_head;
- } /* end if */
- else
- /* Advance to next entry */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
- } /* while ( ! done ) */
+ /* Restart scan */
+ entry_ptr = cache_ptr->il_head;
+ } /* end if */
+ else
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
- /* Reset the counters so that we can detect insertions, loads,
- * moves, and flush dependency height changes caused by the pre_serialize
- * and serialize callbacks.
- */
- cache_ptr->entries_loaded_counter = 0;
- cache_ptr->entries_inserted_counter = 0;
- cache_ptr->entries_relocated_counter = 0;
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* At this point, all entries not marked "flush me last" and in
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
+ * verify that all other entries have up to date images.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ if (entry_ptr->ring == ring) {
+ if (entry_ptr->flush_me_last) {
+ if (!entry_ptr->image_up_to_date) {
+ HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
- /* At this point, all entries not marked "flush me last" and in
- * the current ring or outside it should be serialized and have up
- * to date images. Scan the index list again to serialize the
- * "flush me last" entries (if they are in the current ring) and to
- * verify that all other entries have up to date images.
- */
- entry_ptr = cache_ptr->il_head;
- while (entry_ptr != NULL) {
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
- HDassert(entry_ptr->ring < H5C_RING_NTYPES);
- HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+ /* Serialize the entry */
+ if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ /* Check for the cache changing */
+ if ((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
+ "flush_me_last entry serialization triggered restart")
- if (entry_ptr->ring == ring) {
- if (entry_ptr->flush_me_last) {
- if (!entry_ptr->image_up_to_date) {
- HDassert(entry_ptr->serialization_count == 0);
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
-
- /* Serialize the entry */
- if (H5C__serialize_single_entry(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
-
- /* Check for the cache changing */
- if ((cache_ptr->entries_loaded_counter > 0) ||
- (cache_ptr->entries_inserted_counter > 0) ||
- (cache_ptr->entries_relocated_counter > 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL,
- "flush_me_last entry serialization triggered restart")
-
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
- HDassert(entry_ptr->serialization_count == 0);
-#ifndef NDEBUG
- /* Increment serialization counter (to detect multiple serializations) */
- entry_ptr->serialization_count++;
-#endif /* NDEBUG */
- } /* end if */
- } /* end if */
- else {
- HDassert(entry_ptr->image_up_to_date);
- HDassert(entry_ptr->serialization_count <= 1);
HDassert(entry_ptr->flush_dep_nunser_children == 0);
- } /* end else */
- } /* if ( entry_ptr->ring == ring ) */
+ HDassert(entry_ptr->serialization_count == 0);
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+ else {
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->serialization_count <= 1);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ } /* end else */
+ } /* if ( entry_ptr->ring == ring ) */
- entry_ptr = entry_ptr->il_next;
- } /* while ( entry_ptr != NULL ) */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
done:
- HDassert(cache_ptr->serialization_in_progress);
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__serialize_ring() */
+ HDassert(cache_ptr->serialization_in_progress);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_ring() */
- /*-------------------------------------------------------------------------
- * Function: H5C__serialize_single_entry
- *
- * Purpose: Serialize the cache entry pointed to by the entry_ptr
- * parameter.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer, 7/24/15
- *
- *-------------------------------------------------------------------------
- */
- static herr_t H5C__serialize_single_entry(H5F_t * f, H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr)
- {
- herr_t ret_value = SUCCEED; /* Return value */
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_single_entry
+ *
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * parameter.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer, 7/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_single_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_STATIC
- /* Sanity checks */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->prefetched);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(!entry_ptr->flush_in_progress);
- HDassert(entry_ptr->type);
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->flush_in_progress);
+ HDassert(entry_ptr->type);
- /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
- * will not be evicted out from under us. Must set it back to FALSE
- * when we are done.
- */
- entry_ptr->flush_in_progress = TRUE;
+ /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
+ * will not be evicted out from under us. Must set it back to FALSE
+ * when we are done.
+ */
+ entry_ptr->flush_in_progress = TRUE;
- /* Allocate buffer for the entry image if required. */
- if (NULL == entry_ptr->image_ptr) {
- HDassert(entry_ptr->size > 0);
- if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
+ /* Allocate buffer for the entry image if required. */
+ if (NULL == entry_ptr->image_ptr) {
+ HDassert(entry_ptr->size > 0);
+ if (NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
-#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- } /* end if */
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
- /* Generate image for entry */
- if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
+ /* Generate image for entry */
+ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
- /* Reset the flush_in progress flag */
- entry_ptr->flush_in_progress = FALSE;
+ /* Reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
done:
- HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
- HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__serialize_single_entry() */
+ HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_single_entry() */
- /*-------------------------------------------------------------------------
- * Function: H5C__generate_image
- *
- * Purpose: Serialize an entry and generate its image.
- *
- * Note: This may cause the entry to be re-sized and/or moved in
- * the cache.
- *
- * As we will not update the metadata cache's data structures
- * until we we finish the write, we must touch up these
- * data structures for size and location changes even if we
- * are about to delete the entry from the cache (i.e. on a
- * flush destroy).
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Mohamad Chaarawi
- * 2/10/16
- *
- * Changes: Added code to update the page field in the VFD SWMR reader
- * case.
- *
- * JRM -- 12/14/18
- *
- *-------------------------------------------------------------------------
- */
- herr_t H5C__generate_image(H5F_t * f, H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr)
- {
- haddr_t new_addr = HADDR_UNDEF;
- haddr_t old_addr = HADDR_UNDEF;
- size_t new_len = 0;
- unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
- herr_t ret_value = SUCCEED;
+/*-------------------------------------------------------------------------
+ * Function: H5C__generate_image
+ *
+ * Purpose: Serialize an entry and generate its image.
+ *
+ * Note: This may cause the entry to be re-sized and/or moved in
+ * the cache.
+ *
+ * As we will not update the metadata cache's data structures
+ * until we we finish the write, we must touch up these
+ * data structures for size and location changes even if we
+ * are about to delete the entry from the cache (i.e. on a
+ * flush destroy).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ * 2/10/16
+ *
+ * Changes: Added code to update the page field in the VFD SWMR reader
+ * case.
+ *
+ * JRM -- 12/14/18
+ *
+ * Updated sanity checks for the possibility that the skip
+ * list is disabled.
+ * JRM 5/16/20
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
+{
+ haddr_t new_addr = HADDR_UNDEF;
+ haddr_t old_addr = HADDR_UNDEF;
+ size_t new_len = 0;
+ unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
+ herr_t ret_value = SUCCEED;
- FUNC_ENTER_PACKAGE
+ FUNC_ENTER_PACKAGE
- /* Sanity check */
- HDassert(f);
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ /* Sanity check */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* if this is a VFD SWMR reader, verify that the page size is defined */
- HDassert((!cache_ptr->vfd_swmr_reader) || (cache_ptr->page_size > 0));
+ /* if this is a VFD SWMR reader, verify that the page size is defined */
+ HDassert((!cache_ptr->vfd_swmr_reader) || (cache_ptr->page_size > 0));
- HDassert(entry_ptr);
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(!entry_ptr->image_up_to_date);
- HDassert(entry_ptr->is_dirty);
- HDassert(!entry_ptr->is_protected);
- HDassert(entry_ptr->type);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(entry_ptr->type);
- /* make note of the entry's current address */
- old_addr = entry_ptr->addr;
+ /* make note of the entry's current address */
+ old_addr = entry_ptr->addr;
- /* Call client's pre-serialize callback, if there's one */
- if (entry_ptr->type->pre_serialize &&
- (entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size,
- &new_addr, &new_len, &serialize_flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
+ /* Call client's pre-serialize callback, if there's one */
+ if ((entry_ptr->type->pre_serialize) &&
+ ((entry_ptr->type->pre_serialize)(f, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size, &new_addr,
+ &new_len, &serialize_flags) < 0))
- /* Check for any flags set in the pre-serialize callback */
- if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
- /* Check for unexpected flags from serialize callback */
- if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
+ /* Check for any flags set in the pre-serialize callback */
+ if (serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
+ /* Check for unexpected flags from serialize callback */
+ if (serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
#ifdef H5_HAVE_PARALLEL
- /* In the parallel case, resizes and moves in
- * the serialize operation can cause problems.
- * If they occur, scream and die.
- *
- * At present, in the parallel case, the aux_ptr
- * will only be set if there is more than one
- * process. Thus we can use this to detect
- * the parallel case.
- *
- * This works for now, but if we start using the
- * aux_ptr for other purposes, we will have to
- * change this test accordingly.
- *
- * NB: While this test detects entryies that attempt
- * to resize or move themselves during a flush
- * in the parallel case, it will not detect an
- * entry that dirties, resizes, and/or moves
- * other entries during its flush.
- *
- * From what Quincey tells me, this test is
- * sufficient for now, as any flush routine that
- * does the latter will also do the former.
- *
- * If that ceases to be the case, further
- * tests will be necessary.
- */
- if (cache_ptr->aux_ptr != NULL)
+ /* In the parallel case, resizes and moves in
+ * the serialize operation can cause problems.
+ * If they occur, scream and die.
+ *
+ * At present, in the parallel case, the aux_ptr
+ * will only be set if there is more than one
+ * process. Thus we can use this to detect
+ * the parallel case.
+ *
+ * This works for now, but if we start using the
+ * aux_ptr for other purposes, we will have to
+ * change this test accordingly.
+ *
+ * NB: While this test detects entryies that attempt
+ * to resize or move themselves during a flush
+ * in the parallel case, it will not detect an
+ * entry that dirties, resizes, and/or moves
+ * other entries during its flush.
+ *
+ * From what Quincey tells me, this test is
+ * sufficient for now, as any flush routine that
+ * does the latter will also do the former.
+ *
+ * If that ceases to be the case, further
+ * tests will be necessary.
+ */
+ if (cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occurred in parallel case")
#endif
- /* If required, resize the buffer and update the entry and the cache
- * data structures
- */
- if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
+ /* If required, resize the buffer and update the entry and the cache
+ * data structures
+ */
+ if (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
- /* Sanity check */
- HDassert(new_len > 0);
+ /* Sanity check */
+ HDassert(new_len > 0);
- /* Allocate a new image buffer */
- if (NULL == (entry_ptr->image_ptr =
- H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
+ /* Allocate a new image buffer */
+ if (NULL ==
+ (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for on disk image buffer")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
- H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE);
+ H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + new_len, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- /* Update statistics for resizing the entry */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
+ /* Update statistics for resizing the entry */
+ H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- /* Update the hash table for the size change */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr,
- !(entry_ptr->is_dirty));
+ /* Update the hash table for the size change */
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr,
+ !(entry_ptr->is_dirty));
- /* The entry can't be protected since we are in the process of
- * flushing it. Thus we must update the replacement policy data
- * structures for the size change. The macro deals with the pinned
- * case.
- */
- H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
+ /* The entry can't be protected since we are in the process of
+ * flushing it. Thus we must update the replacement policy data
+ * structures for the size change. The macro deals with the pinned
+ * case.
+ */
+ H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- /* As we haven't updated the cache data structures for
- * for the flush or flush destroy yet, the entry should
- * be in the slist. Thus update it for the size change.
- */
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
+ /* As we haven't updated the cache data structures for
+ * for the flush or flush destroy yet, the entry should
+ * be in the slist if the slist is enabled. Since
+ * H5C__UPDATE_SLIST_FOR_SIZE_CHANGE() is a no-op if the
+ * slist is enabled, call it un-conditionally.
+ */
+ HDassert(entry_ptr->is_dirty);
+ HDassert((entry_ptr->in_slist) || (!cache_ptr->slist_enabled));
- /* Finally, update the entry for its new size */
- entry_ptr->size = new_len;
+ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
- } /* end if */
+ /* Finally, update the entry for its new size */
+ entry_ptr->size = new_len;
- /* If required, udate the entry and the cache data structures
- * for a move
- */
- if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
+ } /* end if */
- /* Update stats and entries relocated counter */
- H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
+ /* If required, udate the entry and the cache data structures
+ * for a move
+ */
+ if (serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
- /* We must update cache data structures for the change in address */
- if (entry_ptr->addr == old_addr) {
+ /* Update stats and entries relocated counter */
+ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
- /* Delete the entry from the hash table and the slist */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
- H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
+ /* We must update cache data structures for the change in address */
+ if (entry_ptr->addr == old_addr) {
- /* Update the entry for its new address */
- entry_ptr->addr = new_addr;
+ /* Delete the entry from the hash table and the slist */
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
+ H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
- /* In the VFD SWMR reader case, update the entry page field */
- if (cache_ptr->vfd_swmr_reader) {
+ /* Update the entry for its new address */
+ entry_ptr->addr = new_addr;
- entry_ptr->page = (new_addr / cache_ptr->page_size);
- }
+ /* In the VFD SWMR reader case, update the entry page field */
+ if (cache_ptr->vfd_swmr_reader) {
- /* And then reinsert in the index and slist */
- H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
+ entry_ptr->page = (new_addr / cache_ptr->page_size);
}
- else { /* move is already done for us -- just do sanity checks */
- HDassert(entry_ptr->addr == new_addr);
- HDassert((!cache_ptr->vfd_swmr_reader) ||
- (entry_ptr->page == (entry_ptr->addr / cache_ptr->page_size)));
- }
- } /* end if */
- } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
+ /* And then reinsert in the index and slist */
+ H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
+
+ } /* end if */
+ else { /* move is already done for us -- just do sanity checks */
+
+ HDassert(entry_ptr->addr == new_addr);
+ HDassert((!cache_ptr->vfd_swmr_reader) ||
+ (entry_ptr->page == (entry_ptr->addr / cache_ptr->page_size)));
+ }
+ } /* end if */
+ } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
- /* Serialize object into buffer */
- if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
+ /* Serialize object into buffer */
+ if (entry_ptr->type->serialize(f, entry_ptr->image_ptr, entry_ptr->size, (void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
#if H5C_DO_MEMORY_SANITY_CHECKS
- HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
- H5C_IMAGE_EXTRA_SPACE));
+ HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
+ H5C_IMAGE_EXTRA_SPACE));
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- entry_ptr->image_up_to_date = TRUE;
+ entry_ptr->image_up_to_date = TRUE;
- /* Propagate the fact that the entry is serialized up the
- * flush dependency chain if appropriate. Since the image must
- * have been out of date for this function to have been called
- * (see assertion on entry), no need to check that -- only check
- * for flush dependency parents.
- */
- HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ /* Propagate the fact that the entry is serialized up the
+ * flush dependency chain if appropriate. Since the image must
+ * have been out of date for this function to have been called
+ * (see assertion on entry), no need to check that -- only check
+ * for flush dependency parents.
+ */
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
- if (entry_ptr->flush_dep_nparents > 0) {
+ if (entry_ptr->flush_dep_nparents > 0) {
- if (H5C__mark_flush_dep_serialized(entry_ptr) < 0)
+ if (H5C__mark_flush_dep_serialized(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL,
- "Can't propagate serialization status to fd parents")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+ }
done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__generate_image */
- FUNC_LEAVE_NOAPI(ret_value)
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_remove_entry
+ *
+ * Purpose: Remove an entry from the cache. Must be not protected, pinned,
+ * dirty, involved in flush dependencies, etc.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * September 17, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_remove_entry(void *_entry)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */
+ H5C_t * cache; /* Cache for file */
+ herr_t ret_value = SUCCEED; /* Return value */
- } /* H5C__generate_image */
+ FUNC_ENTER_NOAPI(FAIL)
- /*-------------------------------------------------------------------------
- *
- * Function: H5C_remove_entry
- *
- * Purpose: Remove an entry from the cache. Must be not protected, pinned,
- * dirty, involved in flush dependencies, etc.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * September 17, 2016
- *
- *-------------------------------------------------------------------------
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(entry->ring != H5C_RING_UNDEFINED);
+ cache = entry->cache_ptr;
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Check for error conditions */
+ if (entry->is_dirty)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
+ if (entry->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
+ if (entry->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
+ /* NOTE: If these two errors are getting tripped because the entry is
+ * in a flush dependency with a freedspace entry, move the checks
+ * after the "before evict" message is sent, and add the
+ * "child being evicted" message to the "before evict" notify
+ * section below. QAK - 2017/08/03
+ */
+ if (entry->flush_dep_nparents > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
+ "can't remove entry with flush dependency parents from cache")
+ if (entry->flush_dep_nchildren > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
+ "can't remove entry with flush dependency children from cache")
+
+ /* Additional internal cache consistency checks */
+ HDassert(!entry->in_slist);
+ HDassert(!entry->flush_marker);
+ HDassert(!entry->flush_in_progress);
+
+ /* Note that the algorithm below is (very) similar to the set of operations
+ * in H5C__flush_single_entry() and should be kept in sync with changes
+ * to that code. - QAK, 2016/11/30
*/
- herr_t H5C_remove_entry(void *_entry)
- {
- H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry to remove */
- H5C_t * cache; /* Cache for file */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Sanity checks */
- HDassert(entry);
- HDassert(entry->ring != H5C_RING_UNDEFINED);
- cache = entry->cache_ptr;
- HDassert(cache);
- HDassert(cache->magic == H5C__H5C_T_MAGIC);
-
- /* Check for error conditions */
- if (entry->is_dirty)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove dirty entry from cache")
- if (entry->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove protected entry from cache")
- if (entry->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove pinned entry from cache")
- /* NOTE: If these two errors are getting tripped because the entry is
- * in a flush dependency with a freedspace entry, move the checks
- * after the "before evict" message is sent, and add the
- * "child being evicted" message to the "before evict" notify
- * section below. QAK - 2017/08/03
- */
- if (entry->flush_dep_nparents > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
- "can't remove entry with flush dependency parents from cache")
- if (entry->flush_dep_nchildren > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL,
- "can't remove entry with flush dependency children from cache")
-
- /* Additional internal cache consistency checks */
- HDassert(!entry->in_slist);
- HDassert(!entry->flush_marker);
- HDassert(!entry->flush_in_progress);
-
- /* Note that the algorithm below is (very) similar to the set of operations
- * in H5C__flush_single_entry() and should be kept in sync with changes
- * to that code. - QAK, 2016/11/30
- */
-
- /* Update stats, as if we are "destroying" and taking ownership of the entry */
- H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
- /* If the entry's type has a 'notify' callback, send a 'before eviction'
- * notice while the entry is still fully integrated in the cache.
- */
- if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+ /* Update stats, as if we are "destroying" and taking ownership of the entry */
+ H5C__UPDATE_STATS_FOR_EVICTION(cache, entry, TRUE)
- /* Update the cache internal data structures as appropriate for a destroy.
- * Specifically:
- * 1) Delete it from the index
- * 2) Delete it from the collective read access list
- * 3) Update the replacement policy for eviction
- * 4) Remove it from the tag list for this object
- */
+ /* If the entry's type has a 'notify' callback, send a 'before eviction'
+ * notice while the entry is still fully integrated in the cache.
+ */
+ if (entry->type->notify && (entry->type->notify)(H5C_NOTIFY_ACTION_BEFORE_EVICT, entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry to evict")
+
+ /* Update the cache internal data structures as appropriate for a destroy.
+ * Specifically:
+ * 1) Delete it from the index
+ * 2) Delete it from the collective read access list
+ * 3) Update the replacement policy for eviction
+ * 4) Remove it from the tag list for this object
+ */
- H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
+ H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
#ifdef H5_HAVE_PARALLEL
- /* Check for collective read access flag */
- if (entry->coll_access) {
- entry->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ /* Check for collective read access flag */
+ if (entry->coll_access) {
+ entry->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
- H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
+ H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
- /* Remove entry from tag list */
- if (H5C__untag_entry(cache, entry) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+ /* Remove entry from tag list */
+ if (H5C__untag_entry(cache, entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
- /* Increment entries_removed_counter and set last_entry_removed_ptr.
- * As we me be about to free the entry, recall that last_entry_removed_ptr
- * must NEVER be dereferenced.
- *
- * Recall that these fields are maintained to allow functions that perform
- * scans of lists of entries to detect the unexpected removal of entries
- * (via expunge, eviction, or take ownership at present), so that they can
- * re-start their scans if necessary.
- *
- * Also check if the entry we are watching for removal is being
- * removed (usually the 'next' entry for an iteration) and reset
- * it to indicate that it was removed.
- */
- cache->entries_removed_counter++;
- cache->last_entry_removed_ptr = entry;
- if (entry == cache->entry_watched_for_removal)
- cache->entry_watched_for_removal = NULL;
+ /* Increment entries_removed_counter and set last_entry_removed_ptr.
+ * As we me be about to free the entry, recall that last_entry_removed_ptr
+ * must NEVER be dereferenced.
+ *
+ * Recall that these fields are maintained to allow functions that perform
+ * scans of lists of entries to detect the unexpected removal of entries
+ * (via expunge, eviction, or take ownership at present), so that they can
+ * re-start their scans if necessary.
+ *
+ * Also check if the entry we are watching for removal is being
+ * removed (usually the 'next' entry for an iteration) and reset
+ * it to indicate that it was removed.
+ */
+ cache->entries_removed_counter++;
+ cache->last_entry_removed_ptr = entry;
+ if (entry == cache->entry_watched_for_removal)
+ cache->entry_watched_for_removal = NULL;
- /* Internal cache data structures should now be up to date, and
- * consistent with the status of the entry.
- *
- * Now clean up internal cache fields if appropriate.
- */
+ /* Internal cache data structures should now be up to date, and
+ * consistent with the status of the entry.
+ *
+ * Now clean up internal cache fields if appropriate.
+ */
- /* Free the buffer for the on disk image */
- if (entry->image_ptr != NULL)
- entry->image_ptr = H5MM_xfree(entry->image_ptr);
+ /* Free the buffer for the on disk image */
+ if (entry->image_ptr != NULL)
+ entry->image_ptr = H5MM_xfree(entry->image_ptr);
- /* Reset the pointer to the cache the entry is within */
- entry->cache_ptr = NULL;
+ /* Reset the pointer to the cache the entry is within */
+ entry->cache_ptr = NULL;
- /* Client is taking ownership of the entry. Set bad magic here so the
- * cache will choke unless the entry is re-inserted properly
- */
- entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
+ /* Client is taking ownership of the entry. Set bad magic here so the
+ * cache will choke unless the entry is re-inserted properly
+ */
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
done:
- FUNC_LEAVE_NOAPI(ret_value)
- } /* H5C__remove_entry() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__remove_entry() */
+
+/* TEMPORARY (during VFD SWMR sync with develop - reduces churn) */
+/* clang-format on */
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index 4835727..7784c3b 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -262,6 +262,12 @@ H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
* Programmer: John Mainzer
* 11/15/14
*
+ * Changes: Updated function for the slist_enabled field in H5C_t.
+ * Recall that to minimize slist overhead, the slist is
+ * empty and not maintained if cache_ptr->slist_enabled is
+ * false.
+ * JRM -- 5/6/20
+ *
*-------------------------------------------------------------------------
*/
#ifndef NDEBUG
@@ -280,6 +286,7 @@ H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn)
HDassert(calling_fcn != NULL);
HDfprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n", calling_fcn);
+ HDfprintf(stdout, " slist %s.\n", cache_ptr->slist_enabled ? "enabled" : "disabled");
HDfprintf(stdout, " slist len = %" PRIu32 ".\n", cache_ptr->slist_len);
HDfprintf(stdout, " slist size = %zu.\n", cache_ptr->slist_size);
@@ -404,7 +411,7 @@ H5C_dump_coll_write_list(H5C_t *cache_ptr, char *calling_fcn)
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDfprintf(stdout, "%s%d 0x%016" PRIxHADDR " %4%zu %d/%d %d %s\n",
+ HDfprintf(stdout, "%s%d 0x%016" PRIxHADDR " %4zu %d/%d %d %s\n",
cache_ptr->prefix, i, entry_ptr->addr, entry_ptr->size, (int)(entry_ptr->is_protected),
(int)(entry_ptr->is_pinned), (int)(entry_ptr->is_dirty), entry_ptr->type->name);
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index 5776c31..659382b 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -453,6 +453,10 @@ done:
*
* Programmer: John Mainzer, 8/10/15
*
+ * Changes: Updated sanity checks for possibility that the slist
+ * is disabled.
+ * JRM -- 5/17/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -693,7 +697,10 @@ H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t
pf_entry_ptr->image_ptr = NULL;
if (pf_entry_ptr->is_dirty) {
- HDassert(pf_entry_ptr->in_slist);
+
+ HDassert(((cache_ptr->slist_enabled) && (pf_entry_ptr->in_slist)) ||
+ ((!cache_ptr->slist_enabled) && (!pf_entry_ptr->in_slist)));
+
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
} /* end if */
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index 6543ae5..d6da9b7 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -185,7 +185,7 @@ H5C_apply_candidate_list(H5F_t *f, H5C_t *cache_ptr, unsigned num_candidates, ha
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(num_candidates > 0);
- HDassert(num_candidates <= cache_ptr->slist_len);
+ HDassert((!cache_ptr->slist_enabled) || (num_candidates <= cache_ptr->slist_len));
HDassert(candidates_list_ptr != NULL);
HDassert(0 <= mpi_rank);
HDassert(mpi_rank < mpi_size);
@@ -393,6 +393,7 @@ done:
} /* H5C_apply_candidate_list() */
/*-------------------------------------------------------------------------
+ *
* Function: H5C_construct_candidate_list__clean_cache
*
* Purpose: Construct the list of entries that should be flushed to
@@ -408,6 +409,16 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Thus we can not longer use
+ * cache_ptr->slist_size to determine the total size of
+ * the entries we must insert in the candidate list.
+ *
+ * To address this, we now use cache_ptr->dirty_index_size
+ * instead.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -421,18 +432,22 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* As a sanity check, set space needed to the size of the skip list.
- * This should be the sum total of the sizes of all the dirty entries
- * in the metadata cache.
+ /* As a sanity check, set space needed to the dirty_index_size. This
+ * should be the sum total of the sizes of all the dirty entries
+ * in the metadata cache. Note that if the slist is enabled,
+ * cache_ptr->slist_size should equal cache_ptr->dirty_index_size.
*/
- space_needed = cache_ptr->slist_size;
+ space_needed = cache_ptr->dirty_index_size;
+
+ HDassert((!cache_ptr->slist_enabled) || (space_needed == cache_ptr->slist_size));
/* Recall that while we shouldn't have any protected entries at this
* point, it is possible that some dirty entries may reside on the
* pinned list at this point.
*/
- HDassert(cache_ptr->slist_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
- HDassert(cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len));
+ HDassert(cache_ptr->dirty_index_size <= (cache_ptr->dLRU_list_size + cache_ptr->pel_size));
+ HDassert((!cache_ptr->slist_enabled) ||
+ (cache_ptr->slist_len <= (cache_ptr->dLRU_list_len + cache_ptr->pel_len)));
if (space_needed > 0) { /* we have work to do */
@@ -441,21 +456,22 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
size_t nominated_entries_size = 0;
haddr_t nominated_addr;
- HDassert(cache_ptr->slist_len > 0);
+ HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while ((nominated_entries_size < space_needed) && (nominated_entries_count < cache_ptr->slist_len) &&
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL)) {
HDassert(!(entry_ptr->is_protected));
HDassert(!(entry_ptr->is_read_only));
HDassert(entry_ptr->ro_ref_count == 0);
HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
@@ -476,7 +492,8 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
*/
entry_ptr = cache_ptr->pel_head_ptr;
- while ((nominated_entries_size < space_needed) && (nominated_entries_count < cache_ptr->slist_len) &&
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL)) {
if (entry_ptr->is_dirty) {
@@ -502,7 +519,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr)
} /* end while */
- HDassert(nominated_entries_count == cache_ptr->slist_len);
+ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count == cache_ptr->slist_len));
HDassert(nominated_entries_size == space_needed);
} /* end if */
@@ -529,6 +546,12 @@ done:
* Programmer: John Mainzer
* 3/17/10
*
+ * Changes: With the slist optimization, the slist is not maintained
+ * unless a flush is in progress. Updated sanity checks to
+ * reflect this.
+ *
+ * JRM -- 7/27/20
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -576,14 +599,15 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
- HDassert(cache_ptr->slist_len > 0);
+ HDassert((!cache_ptr->slist_enabled) || (cache_ptr->slist_len > 0));
/* Scan the dirty LRU list from tail forward and nominate sufficient
* entries to free up the necessary space.
*/
entry_ptr = cache_ptr->dLRU_tail_ptr;
- while ((nominated_entries_size < space_needed) && (nominated_entries_count < cache_ptr->slist_len) &&
+ while ((nominated_entries_size < space_needed) &&
+ ((!cache_ptr->slist_enabled) || (nominated_entries_count < cache_ptr->slist_len)) &&
(entry_ptr != NULL) && (!entry_ptr->flush_me_last)) {
haddr_t nominated_addr;
@@ -592,7 +616,7 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
HDassert(!(entry_ptr->is_read_only));
HDassert(entry_ptr->ro_ref_count == 0);
HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->in_slist);
+ HDassert((!cache_ptr->slist_enabled) || (entry_ptr->in_slist));
nominated_addr = entry_ptr->addr;
@@ -605,7 +629,9 @@ H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr)
entry_ptr = entry_ptr->aux_prev;
} /* end while */
- HDassert(nominated_entries_count <= cache_ptr->slist_len);
+
+ HDassert((!cache_ptr->slist_enabled) || (nominated_entries_count <= cache_ptr->slist_len));
+ HDassert(nominated_entries_size <= cache_ptr->dirty_index_size);
HDassert(nominated_entries_size >= space_needed);
} /* end if */
@@ -893,7 +919,9 @@ H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
entry_ptr = prev_ptr;
} /* end while */
+#ifdef H5C_DO_SANITY_CHECKS
done:
+#endif /* H5C_DO_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_clear_coll_entries */
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 1cb4550..5fc255f 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -59,6 +59,13 @@
/* Initial allocated size of the "flush_dep_parent" array */
#define H5C_FLUSH_DEP_PARENT_INIT 8
+
+/* Set to TRUE to enable the slist optimization. If this field is TRUE,
+ * the slist is disabled whenever a flush is not in progress.
+ */
+#define H5C__SLIST_OPT_ENABLED TRUE
+
+
/****************************************************************************
*
* We maintain doubly linked lists of instances of H5C_cache_entry_t for a
@@ -1007,234 +1014,327 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
#if H5C_DO_SANITY_CHECKS
-#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || ((entry_ptr) == NULL) || \
- (!H5F_addr_defined((entry_ptr)->addr)) || ((entry_ptr)->ht_next != NULL) || \
- ((entry_ptr)->ht_prev != NULL) || ((entry_ptr)->pi_next != NULL) || \
- ((entry_ptr)->pi_prev != NULL) || ((entry_ptr)->size <= 0) || \
- (H5C__HASH_FCN((entry_ptr)->addr) < 0) || \
- (H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((entry_ptr)->ring <= H5C_RING_UNDEFINED) || ((entry_ptr)->ring >= H5C_RING_NTYPES) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])) || \
- ((cache_ptr)->index_len != (cache_ptr)->il_len) || \
- ((cache_ptr)->index_size != (cache_ptr)->il_size)) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
+#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->ht_next != NULL ) || \
+ ( (entry_ptr)->ht_prev != NULL ) || \
+ ( (entry_ptr)->pi_next != NULL ) || \
+ ( (entry_ptr)->pi_prev != NULL ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
}
-#define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])) || \
- ((cache_ptr)->index_len != (cache_ptr)->il_len) || \
- ((cache_ptr)->index_size != (cache_ptr)->il_size)) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
+#define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
}
-#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || ((cache_ptr)->index_len < 1) || \
- ((entry_ptr) == NULL) || ((cache_ptr)->index_size < (entry_ptr)->size) || \
- (!H5F_addr_defined((entry_ptr)->addr)) || ((entry_ptr)->size <= 0) || \
- (H5C__HASH_FCN((entry_ptr)->addr) < 0) || \
- (H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN) || \
- (((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == NULL) || \
- ((((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] != (entry_ptr)) && \
- ((entry_ptr)->ht_prev == NULL)) || \
- ((((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == (entry_ptr)) && \
- ((entry_ptr)->ht_prev != NULL)) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- (((cache_ptr)->vfd_swmr_reader) && \
- ((((cache_ptr)->page_index[(H5C__PI_HASH_FCN((entry_ptr)->page))] != (entry_ptr)) && \
- ((entry_ptr)->pi_prev == NULL)) || \
- (((cache_ptr)->page_index[(H5C__PI_HASH_FCN((entry_ptr)->page))] == (entry_ptr)) && \
- ((entry_ptr)->pi_prev != NULL)))) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((entry_ptr)->ring <= H5C_RING_UNDEFINED) || ((entry_ptr)->ring >= H5C_RING_NTYPES) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] < (entry_ptr)->size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])) || \
- ((cache_ptr)->index_len != (cache_ptr)->il_len) || \
- ((cache_ptr)->index_size != (cache_ptr)->il_size)) { \
- HDassert(FALSE && "pre HT remove SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \
+#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_len < 1 ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
+ ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
+ ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
+ == NULL ) || \
+ ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
+ != (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev == NULL ) ) || \
+ ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \
+ (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev != NULL ) ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ (((cache_ptr)->vfd_swmr_reader) && \
+ ((((cache_ptr)->page_index[(H5C__PI_HASH_FCN((entry_ptr)->page))] \
+ != (entry_ptr)) && \
+ ((entry_ptr)->pi_prev == NULL)) || \
+ (((cache_ptr)->page_index[(H5C__PI_HASH_FCN((entry_ptr)->page))] \
+ == (entry_ptr)) && \
+ ((entry_ptr)->pi_prev != NULL)))) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] < \
+ (entry_ptr)->size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+ HDassert(FALSE && "pre HT remove SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \
}
-#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || ((entry_ptr) == NULL) || \
- (!H5F_addr_defined((entry_ptr)->addr)) || ((entry_ptr)->size <= 0) || \
- ((entry_ptr)->ht_prev != NULL) || ((entry_ptr)->ht_next != NULL) || \
- ((entry_ptr)->pi_prev != NULL) || ((entry_ptr)->pi_next != NULL) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])) || \
- ((cache_ptr)->index_len != (cache_ptr)->il_len) || \
- ((cache_ptr)->index_size != (cache_ptr)->il_size)) { \
- HDassert(FALSE && "post HT remove SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \
+#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( (entry_ptr)->ht_prev != NULL ) || \
+ ( (entry_ptr)->ht_next != NULL ) || \
+ ( (entry_ptr)->pi_prev != NULL ) || \
+ ( (entry_ptr)->pi_next != NULL ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+ HDassert(FALSE && "post HT remove SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \
}
/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
-#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- (!H5F_addr_defined(Addr)) || (H5C__HASH_FCN(Addr) < 0) || \
- (H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN)) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \
+#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( ! H5F_addr_defined(Addr) ) || \
+ ( H5C__HASH_FCN(Addr) < 0 ) || \
+ ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \
}
-/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in
-* test/cache_common.h -QAK)
-*/
-#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || ((cache_ptr)->index_len < 1) || \
- ((entry_ptr) == NULL) || ((cache_ptr)->index_size < (entry_ptr)->size) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((entry_ptr)->size <= 0) || (((cache_ptr)->index)[k] == NULL) || \
- ((((cache_ptr)->index)[k] != (entry_ptr)) && ((entry_ptr)->ht_prev == NULL)) || \
- ((((cache_ptr)->index)[k] == (entry_ptr)) && ((entry_ptr)->ht_prev != NULL)) || \
- (((entry_ptr)->ht_prev != NULL) && ((entry_ptr)->ht_prev->ht_next != (entry_ptr))) || \
- (((entry_ptr)->ht_next != NULL) && ((entry_ptr)->ht_next->ht_prev != (entry_ptr)))) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \
+/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
+#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_len < 1 ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->size <= 0 ) || \
+ ( ((cache_ptr)->index)[k] == NULL ) || \
+ ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev == NULL ) ) || \
+ ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \
+ ( (entry_ptr)->ht_prev != NULL ) ) || \
+ ( ( (entry_ptr)->ht_prev != NULL ) && \
+ ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->ht_next != NULL ) && \
+ ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \
}
-/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in
-* test/cache_common.h -QAK)
-*/
-#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
-if (((cache_ptr) == NULL) || (((cache_ptr)->index)[k] != (entry_ptr)) || \
- ((entry_ptr)->ht_prev != NULL)) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \
+/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */
+#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
+ ( (entry_ptr)->ht_prev != NULL ) ) { \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \
}
-#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr, was_clean) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->index_len <= 0) || ((cache_ptr)->index_size <= 0) || \
- ((new_size) <= 0) || ((old_size) > (cache_ptr)->index_size) || \
- (((cache_ptr)->index_len == 1) && ((cache_ptr)->index_size != (old_size))) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((!(was_clean) || ((cache_ptr)->clean_index_size < (old_size))) && \
- (((was_clean)) || ((cache_ptr)->dirty_index_size < (old_size)))) || \
- ((entry_ptr) == NULL) || ((entry_ptr)->ring <= H5C_RING_UNDEFINED) || \
- ((entry_ptr)->ring >= H5C_RING_NTYPES) || ((cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])) || \
- ((cache_ptr)->index_len != (cache_ptr)->il_len) || \
- ((cache_ptr)->index_size != (cache_ptr)->il_size)) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \
+#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr, was_clean) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->index_len <= 0 ) || \
+ ( (cache_ptr)->index_size <= 0 ) || \
+ ( (new_size) <= 0 ) || \
+ ( (old_size) > (cache_ptr)->index_size ) || \
+ ( ( (cache_ptr)->index_len == 1 ) && \
+ ( (cache_ptr)->index_size != (old_size) ) ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( ( !( was_clean ) || \
+ ( (cache_ptr)->clean_index_size < (old_size) ) ) && \
+ ( ( (was_clean) ) || \
+ ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \
}
-#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, entry_ptr) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->index_len <= 0) || ((cache_ptr)->index_size <= 0) || \
- ((new_size) > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((!((entry_ptr)->is_dirty) || ((cache_ptr)->dirty_index_size < (new_size))) && \
- ((((entry_ptr)->is_dirty)) || ((cache_ptr)->clean_index_size < (new_size)))) || \
- (((cache_ptr)->index_len == 1) && ((cache_ptr)->index_size != (new_size))) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring])) || \
- ((cache_ptr)->index_len != (cache_ptr)->il_len) || \
- ((cache_ptr)->index_size != (cache_ptr)->il_size)) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \
+#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
+ entry_ptr) \
+if ( ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->index_len <= 0 ) || \
+ ( (cache_ptr)->index_size <= 0 ) || \
+ ( (new_size) > (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + \
+ (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( ( !((entry_ptr)->is_dirty ) || \
+ ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
+ ( ( ((entry_ptr)->is_dirty) ) || \
+ ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
+ ( ( (cache_ptr)->index_len == 1 ) && \
+ ( (cache_ptr)->index_size != (new_size) ) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \
}
-#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || \
- ((cache_ptr)->index_len <= 0) || ((entry_ptr) == NULL) || ((entry_ptr)->is_dirty != FALSE) || \
- ((cache_ptr)->index_size < (entry_ptr)->size) || \
- ((cache_ptr)->dirty_index_size < (entry_ptr)->size) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((entry_ptr)->ring <= H5C_RING_UNDEFINED) || ((entry_ptr)->ring >= H5C_RING_NTYPES) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]))) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
+if ( \
+ ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_len <= 0 ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->is_dirty != FALSE ) || \
+ ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \
}
-#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
-if (((cache_ptr) == NULL) || ((cache_ptr)->magic != H5C__H5C_T_MAGIC) || \
- ((cache_ptr)->index_len <= 0) || ((entry_ptr) == NULL) || ((entry_ptr)->is_dirty != TRUE) || \
- ((cache_ptr)->index_size < (entry_ptr)->size) || \
- ((cache_ptr)->clean_index_size < (entry_ptr)->size) || \
- ((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((entry_ptr)->ring <= H5C_RING_UNDEFINED) || ((entry_ptr)->ring >= H5C_RING_NTYPES) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]))) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \
+#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
+if ( \
+ ( (cache_ptr) == NULL ) || \
+ ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
+ ( (cache_ptr)->index_len <= 0 ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->is_dirty != TRUE ) || \
+ ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
+ ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \
+ ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
+ ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \
}
-#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
-if (((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]))) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
+if ( ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \
}
-#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
-if (((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->clean_index_size)) || \
- ((cache_ptr)->index_size < ((cache_ptr)->dirty_index_size)) || \
- ((cache_ptr)->index_ring_len[(entry_ptr)->ring] > (cache_ptr)->index_len) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] > (cache_ptr)->index_size) || \
- ((cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
- ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]))) { \
- HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \
+#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
+if ( ( (cache_ptr)->index_size != \
+ ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
+ ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
+ ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
+ (cache_ptr)->index_len ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
+ (cache_ptr)->index_size ) || \
+ ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
+ ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ HDassert(FALSE); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
@@ -1257,151 +1357,152 @@ if (((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->di
#endif /* H5C_DO_SANITY_CHECKS */
-#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
- { \
- int k; \
- H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
- if (cache_ptr->vfd_swmr_reader) { \
- k = H5C__PI_HASH_FCN((entry_ptr)->page); \
- if (((cache_ptr)->page_index)[k] != NULL) { \
- (entry_ptr)->pi_next = ((cache_ptr)->page_index)[k]; \
- (entry_ptr)->pi_next->pi_prev = (entry_ptr); \
- } \
- ((cache_ptr)->page_index)[k] = (entry_ptr); \
- } \
- k = H5C__HASH_FCN((entry_ptr)->addr); \
- if (((cache_ptr)->index)[k] != NULL) { \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
- } \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- (cache_ptr)->index_len++; \
- (cache_ptr)->index_size += (entry_ptr)->size; \
- ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \
- ((cache_ptr)->index_ring_size[entry_ptr->ring]) += (entry_ptr)->size; \
- if ((entry_ptr)->is_dirty) { \
- (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) += (entry_ptr)->size; \
- } \
- else { \
- (cache_ptr)->clean_index_size += (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) += (entry_ptr)->size; \
- } \
- if ((entry_ptr)->flush_me_last) { \
- (cache_ptr)->num_last_entries++; \
- HDassert((cache_ptr)->num_last_entries <= 2); \
- } \
- H5C__IL_DLL_APPEND((entry_ptr), (cache_ptr)->il_head, (cache_ptr)->il_tail, (cache_ptr)->il_len, \
- (cache_ptr)->il_size, fail_val) \
- H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
- H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
- }
-#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \
-{ \
- int k; \
- H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
- if (cache_ptr->vfd_swmr_reader) { \
- k = H5C__PI_HASH_FCN((entry_ptr)->page); \
- if ((entry_ptr)->pi_next) { \
- (entry_ptr)->pi_next->pi_prev = (entry_ptr)->pi_prev; \
- } \
- if ((entry_ptr)->pi_prev) { \
- (entry_ptr)->pi_prev->pi_next = (entry_ptr)->pi_next; \
- } \
- if (((cache_ptr)->page_index)[k] == (entry_ptr)) { \
- ((cache_ptr)->page_index)[k] = (entry_ptr)->pi_next; \
- } \
- (entry_ptr)->pi_next = NULL; \
- (entry_ptr)->pi_prev = NULL; \
- } \
- k = H5C__HASH_FCN((entry_ptr)->addr); \
- if ((entry_ptr)->ht_next) { \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- } \
- if ((entry_ptr)->ht_prev) { \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- } \
- if (((cache_ptr)->index)[k] == (entry_ptr)) { \
- ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
- } \
- (entry_ptr)->ht_next = NULL; \
- (entry_ptr)->ht_prev = NULL; \
- (cache_ptr)->index_len--; \
- (cache_ptr)->index_size -= (entry_ptr)->size; \
- ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \
- ((cache_ptr)->index_ring_size[entry_ptr->ring]) -= (entry_ptr)->size; \
- if ((entry_ptr)->is_dirty) { \
- (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) -= (entry_ptr)->size; \
- } \
- else { \
- (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) -= (entry_ptr)->size; \
- } \
- if ((entry_ptr)->flush_me_last) { \
- (cache_ptr)->num_last_entries--; \
- HDassert((cache_ptr)->num_last_entries <= 1); \
- } \
- H5C__IL_DLL_REMOVE((entry_ptr), (cache_ptr)->il_head, (cache_ptr)->il_tail, (cache_ptr)->il_len, \
- (cache_ptr)->il_size, fail_val) \
- H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
- H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+ if(cache_ptr->vfd_swmr_reader) { \
+ k = H5C__PI_HASH_FCN((entry_ptr)->page); \
+ if (((cache_ptr)->page_index)[k] != NULL) { \
+ (entry_ptr)->pi_next = ((cache_ptr)->page_index)[k]; \
+ (entry_ptr)->pi_next->pi_prev = (entry_ptr); \
+ } \
+ ((cache_ptr)->page_index)[k] = (entry_ptr); \
+ } \
+ k = H5C__HASH_FCN((entry_ptr)->addr); \
+ if(((cache_ptr)->index)[k] != NULL) { \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
+ } \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ (cache_ptr)->index_len++; \
+ (cache_ptr)->index_size += (entry_ptr)->size; \
+ ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ if((entry_ptr)->is_dirty) { \
+ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ } else { \
+ (cache_ptr)->clean_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ } \
+ if((entry_ptr)->flush_me_last) { \
+ (cache_ptr)->num_last_entries++; \
+ HDassert((cache_ptr)->num_last_entries <= 2); \
+ } \
+ H5C__IL_DLL_APPEND((entry_ptr), (cache_ptr)->il_head, \
+ (cache_ptr)->il_tail, (cache_ptr)->il_len, \
+ (cache_ptr)->il_size, fail_val) \
+ H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
+ H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
}
-#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
-{ \
- int k; \
- int depth = 0; \
- H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
- k = H5C__HASH_FCN(Addr); \
- entry_ptr = ((cache_ptr)->index)[k]; \
- while (entry_ptr) { \
- if (H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
- H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
- if (entry_ptr != ((cache_ptr)->index)[k]) { \
- if ((entry_ptr)->ht_next) \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- HDassert((entry_ptr)->ht_prev != NULL); \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_prev = NULL; \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
- } \
- break; \
- } \
- (entry_ptr) = (entry_ptr)->ht_next; \
- (depth)++; \
- } \
- H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \
+#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+ if(cache_ptr->vfd_swmr_reader) { \
+ k = H5C__PI_HASH_FCN((entry_ptr)->page); \
+ if ((entry_ptr)->pi_next) \
+ (entry_ptr)->pi_next->pi_prev = (entry_ptr)->pi_prev; \
+ if ((entry_ptr)->pi_prev) \
+ (entry_ptr)->pi_prev->pi_next = (entry_ptr)->pi_next; \
+ if (((cache_ptr)->page_index)[k] == (entry_ptr)) \
+ ((cache_ptr)->page_index)[k] = (entry_ptr)->pi_next; \
+ (entry_ptr)->pi_next = NULL; \
+ (entry_ptr)->pi_prev = NULL; \
+ } \
+ k = H5C__HASH_FCN((entry_ptr)->addr); \
+ if((entry_ptr)->ht_next) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ if((entry_ptr)->ht_prev) \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ if(((cache_ptr)->index)[k] == (entry_ptr)) \
+ ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
+ (entry_ptr)->ht_next = NULL; \
+ (entry_ptr)->ht_prev = NULL; \
+ (cache_ptr)->index_len--; \
+ (cache_ptr)->index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ if((entry_ptr)->is_dirty) { \
+ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ } else { \
+ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ } \
+ if((entry_ptr)->flush_me_last) { \
+ (cache_ptr)->num_last_entries--; \
+ HDassert((cache_ptr)->num_last_entries <= 1); \
+ } \
+ H5C__IL_DLL_REMOVE((entry_ptr), (cache_ptr)->il_head, \
+ (cache_ptr)->il_tail, (cache_ptr)->il_len, \
+ (cache_ptr)->il_size, fail_val) \
+ H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
+ H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
}
-#define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \
-{ \
- int k; \
- H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
- k = H5C__HASH_FCN(Addr); \
- entry_ptr = ((cache_ptr)->index)[k]; \
- while (entry_ptr) { \
- if (H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
- H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
- if (entry_ptr != ((cache_ptr)->index)[k]) { \
- if ((entry_ptr)->ht_next) \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- HDassert((entry_ptr)->ht_prev != NULL); \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_prev = NULL; \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
- } \
- break; \
- } \
- (entry_ptr) = (entry_ptr)->ht_next; \
- } \
+#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ int depth = 0; \
+ H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
+ k = H5C__HASH_FCN(Addr); \
+ entry_ptr = ((cache_ptr)->index)[k]; \
+ while(entry_ptr) { \
+ if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
+ H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
+ if(entry_ptr != ((cache_ptr)->index)[k]) { \
+ if((entry_ptr)->ht_next) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ HDassert((entry_ptr)->ht_prev != NULL); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+ } \
+ break; \
+ } \
+ (entry_ptr) = (entry_ptr)->ht_next; \
+ (depth)++; \
+ } \
+ H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \
+}
+
+#define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
+ k = H5C__HASH_FCN(Addr); \
+ entry_ptr = ((cache_ptr)->index)[k]; \
+ while(entry_ptr) { \
+ if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
+ H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
+ if(entry_ptr != ((cache_ptr)->index)[k]) { \
+ if((entry_ptr)->ht_next) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ HDassert((entry_ptr)->ht_prev != NULL); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+ } \
+ break; \
+ } \
+ (entry_ptr) = (entry_ptr)->ht_next; \
+ } \
}
#define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \
@@ -1524,9 +1625,27 @@ if (((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->di
* Added code to maintain the cache_ptr->slist_ring_len
* and cache_ptr->slist_ring_size arrays.
*
+ * JRM -- 4/29/20
+ * Reworked macro to support the slist_enabled field
+ * of H5C_t. If slist_enabled == TRUE, the macro
+ * functions as before. Otherwise, the macro is a no-op,
+ * and the slist must be empty.
+ *
*-------------------------------------------------------------------------
*/
+/* NOTE: The H5C__INSERT_ENTRY_IN_SLIST() macro is set up so that
+ *
+ * H5C_DO_SANITY_CHECKS
+ *
+ * and
+ *
+ * H5C_DO_SLIST_SANITY_CHECKS
+ *
+ * can be selected independantly. This is easy to miss as the
+ * two #defines are easy to confuse.
+ */
+
#if H5C_DO_SLIST_SANITY_CHECKS
#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) \
@@ -1541,66 +1660,91 @@ if (((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->di
#if H5C_DO_SANITY_CHECKS
-#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert((cache_ptr)); \
- HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
- HDassert((entry_ptr)); \
- HDassert((entry_ptr)->size > 0); \
- HDassert(H5F_addr_defined((entry_ptr)->addr)); \
- HDassert(!((entry_ptr)->in_slist)); \
- HDassert(!ENTRY_IN_SLIST((cache_ptr), (entry_ptr))); \
- HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
- HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
- HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= (cache_ptr)->slist_len); \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= (cache_ptr)->slist_size); \
- \
- if (H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
- \
- (entry_ptr)->in_slist = TRUE; \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len++; \
- (cache_ptr)->slist_size += (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
- (cache_ptr)->slist_len_increase++; \
- (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
- \
- HDassert((cache_ptr)->slist_len > 0); \
- HDassert((cache_ptr)->slist_size > 0); \
- \
+#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
+ HDassert( !((entry_ptr)->in_slist) ); \
+ HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \
+ &((entry_ptr)->addr)) < 0) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "can't insert entry in skip list") \
+ \
+ (entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len++; \
+ (cache_ptr)->slist_size += (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\
+ (cache_ptr)->slist_len_increase++; \
+ (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( (cache_ptr)->slist_size > 0 ); \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__INSERT_ENTRY_IN_SLIST */
#else /* H5C_DO_SANITY_CHECKS */
-#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
-{ \
- HDassert((cache_ptr)); \
- HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
- HDassert((entry_ptr)); \
- HDassert((entry_ptr)->size > 0); \
- HDassert(H5F_addr_defined((entry_ptr)->addr)); \
- HDassert(!((entry_ptr)->in_slist)); \
- HDassert(!ENTRY_IN_SLIST((cache_ptr), (entry_ptr))); \
- HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
- HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
- HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= (cache_ptr)->slist_len); \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= (cache_ptr)->slist_size); \
- \
- if (H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
- \
- (entry_ptr)->in_slist = TRUE; \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len++; \
- (cache_ptr)->slist_size += (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
- \
- HDassert((cache_ptr)->slist_len > 0); \
- HDassert((cache_ptr)->slist_size > 0); \
- \
+#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( ! ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
+ HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
+ HDassert( !((entry_ptr)->in_slist) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ \
+ if ( H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, \
+ &((entry_ptr)->addr)) < 0) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
+ "can't insert entry in skip list") \
+ \
+ (entry_ptr)->in_slist = TRUE; \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len++; \
+ (cache_ptr)->slist_size += (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size;\
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( (cache_ptr)->slist_size > 0 ); \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__INSERT_ENTRY_IN_SLIST */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1618,72 +1762,129 @@ if (((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->di
*
* Programmer: John Mainzer, 5/10/04
*
+ * Modifications:
+ *
+ * JRM -- 7/21/04
+ * Updated function for the addition of the hash table.
+ *
+ * JRM - 7/27/04
+ * Converted from the function H5C_remove_entry_from_tree()
+ * to the macro H5C__REMOVE_ENTRY_FROM_TREE in the hopes of
+ * wringing a little more performance out of the cache.
+ *
+ * QAK -- 11/27/04
+ * Switched over to using skip list routines.
+ *
+ * JRM -- 3/28/07
+ * Updated sanity checks for the new is_read_only and
+ * ro_ref_count fields in H5C_cache_entry_t.
+ *
+ * JRM -- 12/13/14
+ * Added code to set cache_ptr->slist_changed to TRUE
+ * when an entry is removed from the slist.
+ *
+ * JRM -- 4/29/20
+ * Reworked macro to support the slist_enabled field
+ * of H5C_t. If slist_enabled == TRUE, the macro
+ * functions as before. Otherwise, the macro is a no-op,
+ * and the slist must be empty.
+ *
*-------------------------------------------------------------------------
*/
#if H5C_DO_SANITY_CHECKS
-#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
-{ \
- HDassert((cache_ptr)); \
- HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
- HDassert((entry_ptr)); \
- HDassert(!((entry_ptr)->is_read_only)); \
- HDassert(((entry_ptr)->ro_ref_count) == 0); \
- HDassert((entry_ptr)->size > 0); \
- HDassert((entry_ptr)->in_slist); \
- HDassert((cache_ptr)->slist_ptr); \
- HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
- HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
- HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= (cache_ptr)->slist_len); \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= (cache_ptr)->slist_size); \
- \
- if (H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) != (entry_ptr)) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
- \
- HDassert((cache_ptr)->slist_len > 0); \
- if (!(during_flush)) \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len--; \
- HDassert((cache_ptr)->slist_size >= (entry_ptr)->size); \
- (cache_ptr)->slist_size -= (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= (entry_ptr)->size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size; \
- (cache_ptr)->slist_len_increase--; \
- (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \
- (entry_ptr)->in_slist = FALSE; \
+#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ HDassert( (entry_ptr)->in_slist ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ \
+ if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
+ != (entry_ptr) ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "can't delete entry from skip list") \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ if(!(during_flush)) \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len--; \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ (cache_ptr)->slist_size -= (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
+ (entry_ptr)->size ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\
+ (cache_ptr)->slist_len_increase--; \
+ (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \
+ (entry_ptr)->in_slist = FALSE; \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__REMOVE_ENTRY_FROM_SLIST */
#else /* H5C_DO_SANITY_CHECKS */
-#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
-{ \
- HDassert((cache_ptr)); \
- HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
- HDassert((entry_ptr)); \
- HDassert(!((entry_ptr)->is_read_only)); \
- HDassert(((entry_ptr)->ro_ref_count) == 0); \
- HDassert((entry_ptr)->in_slist); \
- HDassert((cache_ptr)->slist_ptr); \
- HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
- HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
- HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= (cache_ptr)->slist_len); \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= (cache_ptr)->slist_size); \
- \
- if (H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) != (entry_ptr)) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
- \
- HDassert((cache_ptr)->slist_len > 0); \
- if (!(during_flush)) \
- (cache_ptr)->slist_changed = TRUE; \
- (cache_ptr)->slist_len--; \
- HDassert((cache_ptr)->slist_size >= (entry_ptr)->size); \
- (cache_ptr)->slist_size -= (entry_ptr)->size; \
- ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= (entry_ptr)->size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size; \
- (entry_ptr)->in_slist = FALSE; \
-} /* H5C__REMOVE_ENTRY_FROM_SLIST */
+#define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->in_slist ); \
+ HDassert( (cache_ptr)->slist_ptr ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
+ != (entry_ptr) ) \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
+ "can't delete entry from skip list") \
+ \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ if(!(during_flush)) \
+ (cache_ptr)->slist_changed = TRUE; \
+ (cache_ptr)->slist_len--; \
+ HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
+ (cache_ptr)->slist_size -= (entry_ptr)->size; \
+ ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
+ (entry_ptr)->size ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size;\
+ (entry_ptr)->in_slist = FALSE; \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
+} /* H5C__REMOVE_ENTRY_FROM_SLIST */
+
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1730,59 +1931,88 @@ if (((cache_ptr)->index_size != ((cache_ptr)->clean_index_size + (cache_ptr)->di
#if H5C_DO_SANITY_CHECKS
-#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
-{ \
- HDassert((cache_ptr)); \
- HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
- HDassert((old_size) > 0); \
- HDassert((new_size) > 0); \
- HDassert((old_size) <= (cache_ptr)->slist_size); \
- HDassert((cache_ptr)->slist_len > 0); \
- HDassert(((cache_ptr)->slist_len > 1) || ((cache_ptr)->slist_size == (old_size))); \
- HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
- HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
- HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= (cache_ptr)->slist_len); \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= (cache_ptr)->slist_size); \
- \
- (cache_ptr)->slist_size -= (old_size); \
- (cache_ptr)->slist_size += (new_size); \
- \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= (old_size)); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
- \
- (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
- (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
- \
- HDassert((new_size) <= (cache_ptr)->slist_size); \
- HDassert(((cache_ptr)->slist_len > 1) || ((cache_ptr)->slist_size == (new_size))); \
+#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
+ \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] \
+ >= (old_size) ); \
+ \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
+ \
+ (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
+ (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
+ \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#else /* H5C_DO_SANITY_CHECKS */
-#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
-{ \
- HDassert((cache_ptr)); \
- HDassert((cache_ptr)->magic == H5C__H5C_T_MAGIC); \
- HDassert((old_size) > 0); \
- HDassert((new_size) > 0); \
- HDassert((old_size) <= (cache_ptr)->slist_size); \
- HDassert((cache_ptr)->slist_len > 0); \
- HDassert(((cache_ptr)->slist_len > 1) || ((cache_ptr)->slist_size == (old_size))); \
- HDassert((entry_ptr)->ring > H5C_RING_UNDEFINED); \
- HDassert((entry_ptr)->ring < H5C_RING_NTYPES); \
- HDassert((cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= (cache_ptr)->slist_len); \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= (cache_ptr)->slist_size); \
- \
- (cache_ptr)->slist_size -= (old_size); \
- (cache_ptr)->slist_size += (new_size); \
- \
- HDassert((cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= (old_size)); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
- ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
- \
- HDassert((new_size) <= (cache_ptr)->slist_size); \
- HDassert(((cache_ptr)->slist_len > 1) || ((cache_ptr)->slist_size == (new_size))); \
+#define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ \
+ if ( (cache_ptr)->slist_enabled ) { \
+ \
+ HDassert( (old_size) > 0 ); \
+ HDassert( (new_size) > 0 ); \
+ HDassert( (old_size) <= (cache_ptr)->slist_size ); \
+ HDassert( (cache_ptr)->slist_len > 0 ); \
+ HDassert( ((cache_ptr)->slist_len > 1) || \
+ ( (cache_ptr)->slist_size == (old_size) ) ); \
+ HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
+ HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
+ HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_len ); \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
+ (cache_ptr)->slist_size ); \
+ \
+ (cache_ptr)->slist_size -= (old_size); \
+ (cache_ptr)->slist_size += (new_size); \
+ \
+ HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
+ (old_size) ); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
+ ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
+ \
+ HDassert( (new_size) <= (cache_ptr)->slist_size ); \
+ HDassert( ( (cache_ptr)->slist_len > 1 ) || \
+ ( (cache_ptr)->slist_size == (new_size) ) ); \
+ \
+ } else { /* slist disabled */ \
+ \
+ HDassert( (cache_ptr)->slist_len == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
+ } \
} /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -3811,6 +4041,36 @@ typedef struct H5C_tag_info_t {
* are flushed. (this has been changed -- dirty entries are now removed from
* the skip list as they are flushed. JRM - 10/25/05)
*
+ * Update 4/21/20:
+ *
+ * Profiling indicates that the cost of maintaining the skip list is
+ * significant. As it is only used on flush and close, maintaining it
+ * only when needed is an obvious optimization.
+ *
+ * To do this, we add a flag to control maintenanace of the skip list.
+ * This flag is initially set to FALSE, which disables all operations
+ * on the skip list.
+ *
+ * At the beginning of either flush or close, we scan the index list,
+ * insert all dirtly entries in the skip list, and enable operations
+ * on skip list by setting above control flag to true.
+ *
+ * At the end of a complete flush, we verify that the skip list is empty,
+ * and set the control flag back to false, so as to avoid skip list
+ * maintenance overhead until the next flush or close.
+ *
+ * In the case of a partial flush (i.e. flush marked entries), we remove
+ * all remaining entries from the skip list, and then set the control flag
+ * back to false -- again avoiding skip list maintenance overhead until
+ * the next flush or close.
+ *
+ * slist_enabled: Boolean flag used to control operation of the skip
+ * list. If this filed is FALSE, operations on the
+ * slist are no-ops, and the slist must be empty. If
+ * it is TRUE, operations on the slist proceed as usual,
+ * and all dirty entries in the metadata cache must be
+ * listed in the slist.
+ *
* slist_changed: Boolean flag used to indicate whether the contents of
* the slist has changed since the last time this flag was
* reset. This is used in the cache flush code to detect
@@ -4825,6 +5085,7 @@ struct H5C_t {
H5C_cache_entry_t * entry_watched_for_removal;
/* Fields for maintaining list of in-order entries, for flushing */
+ hbool_t slist_enabled;
hbool_t slist_changed;
uint32_t slist_len;
size_t slist_size;
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 8ac78ae..cbc34b2 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -99,9 +99,15 @@
#define H5C_RESIZE_CFG__VALIDATE_INCREMENT 0x2
#define H5C_RESIZE_CFG__VALIDATE_DECREMENT 0x4
#define H5C_RESIZE_CFG__VALIDATE_INTERACTIONS 0x8
-#define H5C_RESIZE_CFG__VALIDATE_ALL \
- (H5C_RESIZE_CFG__VALIDATE_GENERAL | H5C_RESIZE_CFG__VALIDATE_INCREMENT | \
- H5C_RESIZE_CFG__VALIDATE_DECREMENT | H5C_RESIZE_CFG__VALIDATE_INTERACTIONS)
+/* clang-format off */
+#define H5C_RESIZE_CFG__VALIDATE_ALL \
+( \
+ H5C_RESIZE_CFG__VALIDATE_GENERAL | \
+ H5C_RESIZE_CFG__VALIDATE_INCREMENT | \
+ H5C_RESIZE_CFG__VALIDATE_DECREMENT | \
+ H5C_RESIZE_CFG__VALIDATE_INTERACTIONS \
+)
+/* clang-format on */
/* Cache configuration versions */
#define H5C__CURR_AUTO_SIZE_CTL_VER 1
@@ -133,51 +139,51 @@
* function are ignored in that function.
*
* These flags apply to all function calls:
- * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls)
+ * H5C__NO_FLAGS_SET (generic "no flags set" for all fcn calls)
*
*
* These flags apply to H5C_insert_entry():
- * H5C__SET_FLUSH_MARKER_FLAG
- * H5C__PIN_ENTRY_FLAG
- * H5C__FLUSH_LAST_FLAG ; super block only
- * H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
+ * H5C__SET_FLUSH_MARKER_FLAG
+ * H5C__PIN_ENTRY_FLAG
+ * H5C__FLUSH_LAST_FLAG ; super block only
+ * H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
*
* These flags apply to H5C_protect()
- * H5C__READ_ONLY_FLAG
- * H5C__FLUSH_LAST_FLAG ; super block only
- * H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
+ * H5C__READ_ONLY_FLAG
+ * H5C__FLUSH_LAST_FLAG ; super block only
+ * H5C__FLUSH_COLLECTIVELY_FLAG ; super block only
*
* These flags apply to H5C_unprotect():
- * H5C__SET_FLUSH_MARKER_FLAG
- * H5C__DELETED_FLAG
- * H5C__DIRTIED_FLAG
- * H5C__PIN_ENTRY_FLAG
- * H5C__UNPIN_ENTRY_FLAG
- * H5C__FREE_FILE_SPACE_FLAG
- * H5C__TAKE_OWNERSHIP_FLAG
+ * H5C__SET_FLUSH_MARKER_FLAG
+ * H5C__DELETED_FLAG
+ * H5C__DIRTIED_FLAG
+ * H5C__PIN_ENTRY_FLAG
+ * H5C__UNPIN_ENTRY_FLAG
+ * H5C__FREE_FILE_SPACE_FLAG
+ * H5C__TAKE_OWNERSHIP_FLAG
*
* These flags apply to H5C_expunge_entry():
- * H5C__FREE_FILE_SPACE_FLAG
+ * H5C__FREE_FILE_SPACE_FLAG
*
* These flags apply to H5C_evict():
- * H5C__EVICT_ALLOW_LAST_PINS_FLAG
+ * H5C__EVICT_ALLOW_LAST_PINS_FLAG
*
* These flags apply to H5C_flush_cache():
- * H5C__FLUSH_INVALIDATE_FLAG
- * H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
- * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
- * with H5C__FLUSH_INVALIDATE_FLAG)
- * H5C__DURING_FLUSH_FLAG
+ * H5C__FLUSH_INVALIDATE_FLAG
+ * H5C__FLUSH_CLEAR_ONLY_FLAG
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG
+ * H5C__FLUSH_IGNORE_PROTECTED_FLAG (can't use this flag in combination
+ * with H5C__FLUSH_INVALIDATE_FLAG)
+ * H5C__DURING_FLUSH_FLAG
*
* These flags apply to H5C_flush_single_entry():
- * H5C__FLUSH_INVALIDATE_FLAG
- * H5C__FLUSH_CLEAR_ONLY_FLAG
- * H5C__FLUSH_MARKED_ENTRIES_FLAG
- * H5C__TAKE_OWNERSHIP_FLAG
- * H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG
- * H5C__GENERATE_IMAGE_FLAG
- * H5C__UPDATE_PAGE_BUFFER_FLAG
+ * H5C__FLUSH_INVALIDATE_FLAG
+ * H5C__FLUSH_CLEAR_ONLY_FLAG
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG
+ * H5C__TAKE_OWNERSHIP_FLAG
+ * H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG
+ * H5C__GENERATE_IMAGE_FLAG
+ * H5C__UPDATE_PAGE_BUFFER_FLAG
*/
#define H5C__NO_FLAGS_SET 0x00000
#define H5C__SET_FLUSH_MARKER_FLAG 0x00001
@@ -269,206 +275,204 @@ typedef struct H5C_t H5C_t;
* used by the metadata cache for each class of metadata cache entry.
* The fields of the structure are discussed below:
*
- * id: Integer field containing the unique ID of the class of metadata
- * cache entries.
+ * id: Integer field containing the unique ID of the class of metadata
+ * cache entries.
*
* name: Pointer to a string containing the name of the class of metadata
- * cache entries.
+ * cache entries.
*
* mem_type: Instance of H5FD_mem_t, that is used to supply the
- * mem type passed into H5F_block_read().
+ * mem type passed into H5F_block_read().
*
* flags: Flags indicating class-specific behavior.
*
- * Possible flags are:
+ * Possible flags are:
*
- * H5C__CLASS_NO_FLAGS_SET: No special processing.
+ * H5C__CLASS_NO_FLAGS_SET: No special processing.
*
- * H5C__CLASS_SPECULATIVE_LOAD_FLAG: This flag is used only in
+ * H5C__CLASS_SPECULATIVE_LOAD_FLAG: This flag is used only in
* H5C_load_entry(). When it is set, entries are
- * permitted to change their sizes on the first attempt
- * to load.
- *
- * If the new size is larger than the old, the read buffer
- * is reallocated to the new size, loaded from file, and the
- * deserialize routine is called a second time on the
- * new buffer. The entry returned by the first call to
- * the deserialize routine is discarded (via the free_icr
- * call) after the new size is retrieved (via the image_len
- * call). Note that the new size is used as the size of the
- * entry in the cache.
- *
- * If the new size is smaller than the old, no new loads
- * or deserializes are performed, but the new size becomes
- * the size of the entry in the cache.
- *
- * When this flag is set, an attempt to read past the
- * end of file could occur. In this case, if the size
- * returned get_load_size callback would result in a
- * read past the end of file, the size is truncated to
- * avoid this, and processing proceeds as normal.
+ * permitted to change their sizes on the first attempt
+ * to load.
+ *
+ * If the new size is larger than the old, the read buffer
+ * is reallocated to the new size, loaded from file, and the
+ * deserialize routine is called a second time on the
+ * new buffer. The entry returned by the first call to
+ * the deserialize routine is discarded (via the free_icr
+ * call) after the new size is retrieved (via the image_len
+ * call). Note that the new size is used as the size of the
+ * entry in the cache.
+ *
+ * If the new size is smaller than the old, no new loads
+ * or deserializes are performed, but the new size becomes
+ * the size of the entry in the cache.
+ *
+ * When this flag is set, an attempt to read past the
+ * end of file could occur. In this case, if the size
+ * returned get_load_size callback would result in a
+ * read past the end of file, the size is truncated to
+ * avoid this, and processing proceeds as normal.
*
* The following flags may only appear in test code.
*
- * H5C__CLASS_SKIP_READS: This flags is intended only for use in test
- * code. When it is set, reads on load will be skipped,
- * and an uninitialize buffer will be passed to the
- * deserialize function.
+ * H5C__CLASS_SKIP_READS: This flags is intended only for use in test
+ * code. When it is set, reads on load will be skipped,
+ * and an uninitialize buffer will be passed to the
+ * deserialize function.
*
- * H5C__CLASS_SKIP_WRITES: This flags is intended only for use in test
- * code. When it is set, writes of buffers prepared by the
- * serialize callback will be skipped.
+ * H5C__CLASS_SKIP_WRITES: This flags is intended only for use in test
+ * code. When it is set, writes of buffers prepared by the
+ * serialize callback will be skipped.
*
* GET_INITIAL_LOAD_SIZE: Pointer to the 'get initial load size' function.
*
- * This function determines the size based on the information in the
- * parameter "udata" or an initial speculative guess. The size is
- * returned in the parameter "image_len_ptr".
+ * This function determines the size based on the information in the
+ * parameter "udata" or an initial speculative guess. The size is
+ * returned in the parameter "image_len_ptr".
*
- * For an entry with H5C__CLASS_NO_FLAGS_SET:
- * This function returns in "image_len_ptr" the on disk size of the
+ * For an entry with H5C__CLASS_NO_FLAGS_SET:
+ * This function returns in "image_len_ptr" the on disk size of the
* entry.
*
- * For an entry with H5C__CLASS_SPECULATIVE_LOAD_FLAG:
- * This function returns in "image_len_ptr" an initial guess of the
+ * For an entry with H5C__CLASS_SPECULATIVE_LOAD_FLAG:
+ * This function returns in "image_len_ptr" an initial guess of the
* entry's on disk size. This many bytes will be loaded from
* the file and then passed to 'get_final_load_size' callback
* for the actual (final) image length to be determined.
*
- * The typedef for the get_initial_load_size callback is as follows:
+ * The typedef for the get_initial_load_size callback is as follows:
*
- * typedef herr_t (*H5C_get_initial_load_size_func_t)(void *udata_ptr,
- * size_t *image_len_ptr);
+ * typedef herr_t (*H5C_get_initial_load_size_func_t)(void *udata_ptr,
+ * size_t *image_len_ptr);
*
- * The parameters of the get_initial_load_size callback are as follows:
+ * The parameters of the get_initial_load_size callback are as follows:
*
- * udata_ptr: Pointer to user data provided in the protect call, which
- * will also be passed through to the 'get_final_load_size',
+ * udata_ptr: Pointer to user data provided in the protect call, which
+ * will also be passed through to the 'get_final_load_size',
* 'verify_chksum', and 'deserialize' callbacks.
*
- * image_len_ptr: Pointer to the length in bytes of the in-file image to
+ * image_len_ptr: Pointer to the length in bytes of the in-file image to
* be deserialized is to be returned.
*
* This value is used by the cache to determine the size of
* the disk image for the metadata, in order to read the disk
* image from the file.
*
- * Processing in the get_load_size function should proceed as follows:
+ * Processing in the get_load_size function should proceed as follows:
*
- * If successful, the function will place the length in the *image_len_ptr
+ * If successful, the function will place the length in the *image_len_ptr
* associated with supplied user data and then return SUCCEED.
*
- * On failure, the function must return FAIL and push error information
- * onto the error stack with the error API routines, without modifying
+ * On failure, the function must return FAIL and push error information
+ * onto the error stack with the error API routines, without modifying
* the value pointed to by image_len_ptr.
*
*
* GET_FINAL_LOAD_SIZE: Pointer to the 'get final load size' function.
*
- * This function determines the final size of a speculatively loaded
+ * This function determines the final size of a speculatively loaded
* metadata cache entry based on the parameter "image" and the "udata"
* parameters. This callback _must_ be implemented for cache clients
* which set the H5C__CLASS_SPECULATIVE_LOAD_FLAG and must return the
* actual length of on-disk image after being called once.
*
- * This function might deserialize the needed metadata information to
- * determine the actual size. The size is returned in the parameter
+ * This function might deserialize the needed metadata information to
+ * determine the actual size. The size is returned in the parameter
* "actual_len_ptr".
*
- * The typedef for the get_load_size callback is as follows:
+ * The typedef for the get_load_size callback is as follows:
*
- * typedef
- * herr_t (*H5C_get_final_load_size_func_t)(const void *image_ptr,
- * size_t image_len,
- * void *udata_ptr,
- * size_t *actual_len_ptr);
+ * typedef herr_t (*H5C_get_final_load_size_func_t)(const void *image_ptr,
+ * size_t image_len,
+ * void *udata_ptr,
+ * size_t *actual_len_ptr);
*
- * The parameters of the get_load_size callback are as follows:
+ * The parameters of the get_load_size callback are as follows:
*
- * image_ptr: Pointer to a buffer containing the (possibly partial)
+ * image_ptr: Pointer to a buffer containing the (possibly partial)
* metadata read in.
*
- * image_len: The length in bytes of the (possibly partial) in-file image
+ * image_len: The length in bytes of the (possibly partial) in-file image
* to be queried for an actual length.
*
- * udata_ptr: Pointer to user data provided in the protect call, which
- * will also be passed through to the 'verify_chksum' and
+ * udata_ptr: Pointer to user data provided in the protect call, which
+ * will also be passed through to the 'verify_chksum' and
* 'deserialize' callbacks.
*
- * actual_len_ptr: Pointer to the location containing the actual length
- * of the metadata entry on disk.
+ * actual_len_ptr: Pointer to the location containing the actual length
+ * of the metadata entry on disk.
*
- * Processing in the get_final_load_size function should proceed as
- * follows:
+ * Processing in the get_final_load_size function should proceed as follows:
*
- * If successful, the function will place the length in the *actual_len_ptr
+ * If successful, the function will place the length in the *actual_len_ptr
* associated with supplied image and/or user data and then return SUCCEED.
*
- * On failure, the function must return FAIL and push error information
- * onto the error stack with the error API routines, without modifying
+ * On failure, the function must return FAIL and push error information
+ * onto the error stack with the error API routines, without modifying
* the value pointed to by actual_len_ptr.
*
*
* VERIFY_CHKSUM: Pointer to the verify_chksum function.
*
- * This function verifies the checksum computed for the metadata is
- * the same as the checksum stored in the metadata.
+ * This function verifies the checksum computed for the metadata is
+ * the same as the checksum stored in the metadata.
*
- * It computes the checksum based on the metadata stored in the
- * parameter "image_ptr" and the actual length of the metadata in the
- * parameter "len" which is obtained from the "get_load_size" callback.
+ * It computes the checksum based on the metadata stored in the
+ * parameter "image_ptr" and the actual length of the metadata in the
+ * parameter "len" which is obtained from the "get_load_size" callback.
*
- * The typedef for the verify_chksum callback is as follows:
+ * The typedef for the verify_chksum callback is as follows:
*
- * typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr,
- * size_t len,
- * void *udata_ptr);
+ * typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr,
+ * size_t len,
+ * void *udata_ptr);
*
- * The parameters of the verify_chksum callback are as follows:
+ * The parameters of the verify_chksum callback are as follows:
*
- * image_ptr: Pointer to a buffer containing the metadata read in.
+ * image_ptr: Pointer to a buffer containing the metadata read in.
*
- * len: The actual length of the metadata.
+ * len: The actual length of the metadata.
*
- * udata_ptr: Pointer to user data.
+ * udata_ptr: Pointer to user data.
*
*
* DESERIALIZE: Pointer to the deserialize function.
*
- * This function must be able to deserialize a buffer containing the
+ * This function must be able to deserialize a buffer containing the
* on-disk image of a metadata cache entry, allocate and initialize the
* equivalent in core representation, and return a pointer to that
* representation.
*
- * The typedef for the deserialize callback is as follows:
+ * The typedef for the deserialize callback is as follows:
*
- * typedef void *(*H5C_deserialize_func_t)(const void * image_ptr,
- * size_t len,
+ * typedef void *(*H5C_deserialize_func_t)(const void * image_ptr,
+ * size_t len,
* void * udata_ptr,
* boolean * dirty_ptr);
*
- * The parameters of the deserialize callback are as follows:
+ * The parameters of the deserialize callback are as follows:
*
- * image_ptr: Pointer to a buffer of length len containing the
- * contents of the file starting at addr and continuing
- * for len bytes.
+ * image_ptr: Pointer to a buffer of length len containing the
+ * contents of the file starting at addr and continuing
+ * for len bytes.
*
- * len: Length in bytes of the in file image to be deserialized.
+ * len: Length in bytes of the in file image to be deserialized.
*
* This parameter is supplied mainly for sanity checking.
* Sanity checks should be performed when compiled in debug
* mode, but the parameter may be unused when compiled in
* production mode.
*
- * udata_ptr: Pointer to user data provided in the protect call, which
- * must be passed through to the deserialize callback.
+ * udata_ptr: Pointer to user data provided in the protect call, which
+ * must be passed through to the deserialize callback.
*
* dirty_ptr: Pointer to boolean which the deserialize function
- * must use to mark the entry dirty if it has to modify
- * the entry to clean up file corruption left over from
- * an old bug in the HDF5 library.
+ * must use to mark the entry dirty if it has to modify
+ * the entry to clean up file corruption left over from
+ * an old bug in the HDF5 library.
*
- * Processing in the deserialize function should proceed as follows:
+ * Processing in the deserialize function should proceed as follows:
*
* If the image contains valid data, and is of the correct length,
* the deserialize function must allocate space for an in-core
@@ -496,7 +500,7 @@ typedef struct H5C_t H5C_t;
*
* IMAGE_LEN: Pointer to the image length callback.
*
- * The image_len callback is used to obtain the size of newly inserted
+ * The image_len callback is used to obtain the size of newly inserted
* entries and assert verification.
*
* The typedef for the image_len callback is as follows:
@@ -504,58 +508,58 @@ typedef struct H5C_t H5C_t;
* typedef herr_t (*H5C_image_len_func_t)(void *thing,
* size_t *image_len_ptr);
*
- * The parameters of the image_len callback are as follows:
+ * The parameters of the image_len callback are as follows:
*
- * thing: Pointer to the in core representation of the entry.
+ * thing: Pointer to the in core representation of the entry.
*
- * image_len_ptr: Pointer to size_t in which the callback will return
- * the length (in bytes) of the cache entry.
+ * image_len_ptr: Pointer to size_t in which the callback will return
+ * the length (in bytes) of the cache entry.
*
- * Processing in the image_len function should proceed as follows:
+ * Processing in the image_len function should proceed as follows:
*
- * If successful, the function will place the length of the on disk
- * image associated with the in core representation provided in the
- * thing parameter in *image_len_ptr, and then return SUCCEED.
+ * If successful, the function will place the length of the on disk
+ * image associated with the in core representation provided in the
+ * thing parameter in *image_len_ptr, and then return SUCCEED.
*
- * If the function fails, it must return FAIL and push error information
+ * If the function fails, it must return FAIL and push error information
* onto the error stack with the error API routines, and return without
* modifying the values pointed to by the image_len_ptr parameter.
*
*
* PRE_SERIALIZE: Pointer to the pre-serialize callback.
*
- * The pre-serialize callback is invoked by the metadata cache before
- * it needs a current on-disk image of the metadata entry for purposes
- * either constructing a journal or flushing the entry to disk.
- *
- * If the client needs to change the address or length of the entry prior
- * to flush, the pre-serialize callback is responsible for these actions,
- * so that the actual serialize callback (described below) is only
- * responsible for serializing the data structure, not moving it on disk
- * or resizing it.
- *
- * In addition, the client may use the pre-serialize callback to
- * ensure that the entry is ready to be flushed -- in particular,
- * if the entry contains references to other entries that are in
- * temporary file space, the pre-serialize callback must move those
- * entries into real file space so that the serialzed entry will
- * contain no invalid data.
- *
- * One would think that the base address and length of
- * the length of the entry's image on disk would be well known.
- * However, that need not be the case as free space section info
- * entries will change size (and possibly location) depending on the
- * number of blocks of free space being manages, and fractal heap
- * direct blocks can change compressed size (and possibly location)
- * on serialization if compression is enabled. Similarly, it may
- * be necessary to move entries from temporary to real file space.
- *
- * The pre-serialize callback must report any such changes to the
- * cache, which must then update its internal structures as needed.
- *
- * The typedef for the pre-serialize callback is as follows:
- *
- * typedef herr_t (*H5C_pre_serialize_func_t)(H5F_t *f,
+ * The pre-serialize callback is invoked by the metadata cache before
+ * it needs a current on-disk image of the metadata entry for purposes
+ * either constructing a journal or flushing the entry to disk.
+ *
+ * If the client needs to change the address or length of the entry prior
+ * to flush, the pre-serialize callback is responsible for these actions,
+ * so that the actual serialize callback (described below) is only
+ * responsible for serializing the data structure, not moving it on disk
+ * or resizing it.
+ *
+ * In addition, the client may use the pre-serialize callback to
+ * ensure that the entry is ready to be flushed -- in particular,
+ * if the entry contains references to other entries that are in
+ * temporary file space, the pre-serialize callback must move those
+ * entries into real file space so that the serialzed entry will
+ * contain no invalid data.
+ *
+ * One would think that the base address and length of
+ * the length of the entry's image on disk would be well known.
+ * However, that need not be the case as free space section info
+ * entries will change size (and possibly location) depending on the
+ * number of blocks of free space being manages, and fractal heap
+ * direct blocks can change compressed size (and possibly location)
+ * on serialization if compression is enabled. Similarly, it may
+ * be necessary to move entries from temporary to real file space.
+ *
+ * The pre-serialize callback must report any such changes to the
+ * cache, which must then update its internal structures as needed.
+ *
+ * The typedef for the pre-serialize callback is as follows:
+ *
+ * typedef herr_t (*H5C_pre_serialize_func_t)(H5F_t *f,
* void * thing,
* haddr_t addr,
* size_t len,
@@ -563,154 +567,154 @@ typedef struct H5C_t H5C_t;
* size_t * new_len_ptr,
* unsigned * flags_ptr);
*
- * The parameters of the pre-serialize callback are as follows:
+ * The parameters of the pre-serialize callback are as follows:
*
- * f: File pointer -- needed if other metadata cache entries
- * must be modified in the process of serializing the
- * target entry.
+ * f: File pointer -- needed if other metadata cache entries
+ * must be modified in the process of serializing the
+ * target entry.
*
- * thing: Pointer to void containing the address of the in core
- * representation of the target metadata cache entry.
- * This is the same pointer returned by a protect of the
- * addr and len given above.
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry.
+ * This is the same pointer returned by a protect of the
+ * addr and len given above.
*
- * addr: Base address in file of the entry to be serialized.
+ * addr: Base address in file of the entry to be serialized.
*
- * This parameter is supplied mainly for sanity checking.
- * Sanity checks should be performed when compiled in debug
- * mode, but the parameter may be unused when compiled in
- * production mode.
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
*
- * len: Length in bytes of the in file image of the entry to be
- * serialized. Also the size the image passed to the
- * serialize callback (discussed below) unless that
- * value is altered by this function.
+ * len: Length in bytes of the in file image of the entry to be
+ * serialized. Also the size the image passed to the
+ * serialize callback (discussed below) unless that
+ * value is altered by this function.
*
- * This parameter is supplied mainly for sanity checking.
- * Sanity checks should be performed when compiled in debug
- * mode, but the parameter may be unused when compiled in
- * production mode.
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
*
- * new_addr_ptr: Pointer to haddr_t. If the entry is moved by
- * the serialize function, the new on disk base address must
- * be stored in *new_addr_ptr, and the appropriate flag set
- * in *flags_ptr.
+ * new_addr_ptr: Pointer to haddr_t. If the entry is moved by
+ * the serialize function, the new on disk base address must
+ * be stored in *new_addr_ptr, and the appropriate flag set
+ * in *flags_ptr.
*
- * If the entry is not moved by the serialize function,
- * *new_addr_ptr is undefined on pre-serialize callback
- * return.
+ * If the entry is not moved by the serialize function,
+ * *new_addr_ptr is undefined on pre-serialize callback
+ * return.
*
- * new_len_ptr: Pointer to size_t. If the entry is resized by the
- * serialize function, the new length of the on disk image
- * must be stored in *new_len_ptr, and the appropriate flag set
+ * new_len_ptr: Pointer to size_t. If the entry is resized by the
+ * serialize function, the new length of the on disk image
+ * must be stored in *new_len_ptr, and the appropriate flag set
* in *flags_ptr.
*
- * If the entry is not resized by the pre-serialize function,
- * *new_len_ptr is undefined on pre-serialize callback
- * return.
+ * If the entry is not resized by the pre-serialize function,
+ * *new_len_ptr is undefined on pre-serialize callback
+ * return.
*
- * flags_ptr: Pointer to an unsigned integer used to return flags
- * indicating whether the preserialize function resized or moved
- * the entry. If the entry was neither resized or moved, the
+ * flags_ptr: Pointer to an unsigned integer used to return flags
+ * indicating whether the preserialize function resized or moved
+ * the entry. If the entry was neither resized or moved, the
* serialize function must set *flags_ptr to zero. The
* H5C__SERIALIZE_RESIZED_FLAG or H5C__SERIALIZE_MOVED_FLAG must
* be set to indicate a resize or move respectively.
*
- * If the H5C__SERIALIZE_RESIZED_FLAG is set, the new length
- * must be stored in *new_len_ptr.
+ * If the H5C__SERIALIZE_RESIZED_FLAG is set, the new length
+ * must be stored in *new_len_ptr.
*
- * If the H5C__SERIALIZE_MOVED_FLAG flag is set, the
- * new image base address must be stored in *new_addr_ptr.
+ * If the H5C__SERIALIZE_MOVED_FLAG flag is set, the
+ * new image base address must be stored in *new_addr_ptr.
*
- * Processing in the pre-serialize function should proceed as follows:
+ * Processing in the pre-serialize function should proceed as follows:
*
- * The pre-serialize function must examine the in core representation
- * indicated by the thing parameter, if the pre-serialize function does
+ * The pre-serialize function must examine the in core representation
+ * indicated by the thing parameter, if the pre-serialize function does
* not need to change the size or location of the on-disk image, it must
* set *flags_ptr to zero.
*
- * If the size of the on-disk image must be changed, the pre-serialize
+ * If the size of the on-disk image must be changed, the pre-serialize
* function must load the length of the new image into *new_len_ptr, and
* set the H5C__SERIALIZE_RESIZED_FLAG in *flags_ptr.
*
- * If the base address of the on disk image must be changed, the
+ * If the base address of the on disk image must be changed, the
* pre-serialize function must set *new_addr_ptr to the new base address,
* and set the H5C__SERIALIZE_MOVED_FLAG in *flags_ptr.
*
- * In addition, the pre-serialize callback may perform any other
- * processing required before the entry is written to disk
+ * In addition, the pre-serialize callback may perform any other
+ * processing required before the entry is written to disk
*
- * If it is successful, the function must return SUCCEED.
+ * If it is successful, the function must return SUCCEED.
*
- * If it fails for any reason, the function must return FAIL and
- * push error information on the error stack with the error API
- * routines.
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
*
*
* SERIALIZE: Pointer to the serialize callback.
*
- * The serialize callback is invoked by the metadata cache whenever
- * it needs a current on disk image of the metadata entry for purposes
- * either constructing a journal entry or flushing the entry to disk.
+ * The serialize callback is invoked by the metadata cache whenever
+ * it needs a current on disk image of the metadata entry for purposes
+ * either constructing a journal entry or flushing the entry to disk.
*
- * At this point, the base address and length of the entry's image on
+ * At this point, the base address and length of the entry's image on
* disk must be well known and not change during the serialization
* process.
*
- * While any size and/or location changes must have been handled
- * by a pre-serialize call, the client may elect to handle any other
- * changes to the entry required to place it in correct form for
- * writing to disk in this call.
+ * While any size and/or location changes must have been handled
+ * by a pre-serialize call, the client may elect to handle any other
+ * changes to the entry required to place it in correct form for
+ * writing to disk in this call.
*
- * The typedef for the serialize callback is as follows:
+ * The typedef for the serialize callback is as follows:
*
- * typedef herr_t (*H5C_serialize_func_t)(const H5F_t *f,
+ * typedef herr_t (*H5C_serialize_func_t)(const H5F_t *f,
* void * image_ptr,
* size_t len,
* void * thing);
*
- * The parameters of the serialize callback are as follows:
+ * The parameters of the serialize callback are as follows:
*
- * f: File pointer -- needed if other metadata cache entries
- * must be modified in the process of serializing the
- * target entry.
+ * f: File pointer -- needed if other metadata cache entries
+ * must be modified in the process of serializing the
+ * target entry.
*
- * image_ptr: Pointer to a buffer of length len bytes into which a
- * serialized image of the target metadata cache entry is
- * to be written.
+ * image_ptr: Pointer to a buffer of length len bytes into which a
+ * serialized image of the target metadata cache entry is
+ * to be written.
*
- * Note that this buffer will not in general be initialized
- * to any particular value. Thus the serialize function may
- * not assume any initial value and must set each byte in
- * the buffer.
+ * Note that this buffer will not in general be initialized
+ * to any particular value. Thus the serialize function may
+ * not assume any initial value and must set each byte in
+ * the buffer.
*
- * len: Length in bytes of the in file image of the entry to be
- * serialized. Also the size of *image_ptr (below).
+ * len: Length in bytes of the in file image of the entry to be
+ * serialized. Also the size of *image_ptr (below).
*
- * This parameter is supplied mainly for sanity checking.
- * Sanity checks should be performed when compiled in debug
- * mode, but the parameter may be unused when compiled in
- * production mode.
+ * This parameter is supplied mainly for sanity checking.
+ * Sanity checks should be performed when compiled in debug
+ * mode, but the parameter may be unused when compiled in
+ * production mode.
*
- * thing: Pointer to void containing the address of the in core
- * representation of the target metadata cache entry.
- * This is the same pointer returned by a protect of the
- * addr and len given above.
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry.
+ * This is the same pointer returned by a protect of the
+ * addr and len given above.
*
- * Processing in the serialize function should proceed as follows:
+ * Processing in the serialize function should proceed as follows:
*
- * If there are any remaining changes to the entry required before
- * write to disk, they must be dealt with first.
+ * If there are any remaining changes to the entry required before
+ * write to disk, they must be dealt with first.
*
- * The serialize function must then examine the in core
- * representation indicated by the thing parameter, and write a
- * serialized image of its contents into the provided buffer.
+ * The serialize function must then examine the in core
+ * representation indicated by the thing parameter, and write a
+ * serialized image of its contents into the provided buffer.
*
- * If it is successful, the function must return SUCCEED.
+ * If it is successful, the function must return SUCCEED.
*
- * If it fails for any reason, the function must return FAIL and
- * push error information on the error stack with the error API
- * routines.
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
*
*
* NOTIFY: Pointer to the notify callback.
@@ -719,98 +723,98 @@ typedef struct H5C_t H5C_t;
* action on an entry has taken/will take place and the client indicates
* it wishes to be notified about the action.
*
- * The typedef for the notify callback is as follows:
+ * The typedef for the notify callback is as follows:
*
- * typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action,
+ * typedef herr_t (*H5C_notify_func_t)(H5C_notify_action_t action,
* void *thing);
*
- * The parameters of the notify callback are as follows:
+ * The parameters of the notify callback are as follows:
*
- * action: An enum indicating the metadata cache action that has taken/
+ * action: An enum indicating the metadata cache action that has taken/
* will take place.
*
- * thing: Pointer to void containing the address of the in core
- * representation of the target metadata cache entry. This
- * is the same pointer that would be returned by a protect
- * of the addr and len of the entry.
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry. This
+ * is the same pointer that would be returned by a protect
+ * of the addr and len of the entry.
*
- * Processing in the notify function should proceed as follows:
+ * Processing in the notify function should proceed as follows:
*
- * The notify function may perform any action it would like, including
+ * The notify function may perform any action it would like, including
* metadata cache calls.
*
- * If the function is successful, it must return SUCCEED.
+ * If the function is successful, it must return SUCCEED.
*
- * If it fails for any reason, the function must return FAIL and
- * push error information on the error stack with the error API
- * routines.
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
*
*
* FREE_ICR: Pointer to the free ICR callback.
*
- * The free ICR callback is invoked by the metadata cache when it
- * wishes to evict an entry, and needs the client to free the memory
- * allocated for the in core representation.
+ * The free ICR callback is invoked by the metadata cache when it
+ * wishes to evict an entry, and needs the client to free the memory
+ * allocated for the in core representation.
*
- * The typedef for the free ICR callback is as follows:
+ * The typedef for the free ICR callback is as follows:
*
- * typedef herr_t (*H5C_free_icr_func_t)(void * thing));
+ * typedef herr_t (*H5C_free_icr_func_t)(void * thing));
*
- * The parameters of the free ICR callback are as follows:
+ * The parameters of the free ICR callback are as follows:
*
- * thing: Pointer to void containing the address of the in core
- * representation of the target metadata cache entry. This
- * is the same pointer that would be returned by a protect
- * of the addr and len of the entry.
+ * thing: Pointer to void containing the address of the in core
+ * representation of the target metadata cache entry. This
+ * is the same pointer that would be returned by a protect
+ * of the addr and len of the entry.
*
- * Processing in the free ICR function should proceed as follows:
+ * Processing in the free ICR function should proceed as follows:
*
- * The free ICR function must free all memory allocated to the
- * in core representation.
+ * The free ICR function must free all memory allocated to the
+ * in core representation.
*
- * If the function is successful, it must return SUCCEED.
+ * If the function is successful, it must return SUCCEED.
*
- * If it fails for any reason, the function must return FAIL and
- * push error information on the error stack with the error API
- * routines.
+ * If it fails for any reason, the function must return FAIL and
+ * push error information on the error stack with the error API
+ * routines.
*
- * At least when compiled with debug, it would be useful if the
- * free ICR call would fail if the in core representation has been
- * modified since the last serialize callback.
+ * At least when compiled with debug, it would be useful if the
+ * free ICR call would fail if the in core representation has been
+ * modified since the last serialize callback.
*
* GET_FSF_SIZE: Pointer to the get file space free size callback.
*
- * In principle, there is no need for the get file space free size
- * callback. However, as an optimization, it is sometimes convenient
- * to allocate and free file space for a number of cache entries
- * simultaneously in a single contiguous block of file space.
+ * In principle, there is no need for the get file space free size
+ * callback. However, as an optimization, it is sometimes convenient
+ * to allocate and free file space for a number of cache entries
+ * simultaneously in a single contiguous block of file space.
*
- * File space allocation is done by the client, so the metadata cache
- * need not be involved. However, since the metadata cache typically
+ * File space allocation is done by the client, so the metadata cache
+ * need not be involved. However, since the metadata cache typically
* handles file space release when an entry is destroyed, some
- * adjustment on the part of the metadata cache is required for this
- * operation.
+ * adjustment on the part of the metadata cache is required for this
+ * operation.
*
* The get file space free size callback exists to support this
- * operation.
+ * operation.
*
- * If a group of cache entries that were allocated as a group are to
- * be discarded and their file space released, the type of the first
- * (i.e. lowest address) entry in the group must implement the
- * get free file space size callback.
+ * If a group of cache entries that were allocated as a group are to
+ * be discarded and their file space released, the type of the first
+ * (i.e. lowest address) entry in the group must implement the
+ * get free file space size callback.
*
- * To free the file space of all entries in the group in a single
- * operation, first expunge all entries other than the first without
- * the free file space flag.
+ * To free the file space of all entries in the group in a single
+ * operation, first expunge all entries other than the first without
+ * the free file space flag.
*
- * Then, to complete the operation, unprotect or expunge the first
- * entry in the block with the free file space flag set. Since
- * the get free file space callback is implemented, the metadata
- * cache will use this callback to get the size of the block to be
- * freed, instead of using the size of the entry as is done otherwise.
+ * Then, to complete the operation, unprotect or expunge the first
+ * entry in the block with the free file space flag set. Since
+ * the get free file space callback is implemented, the metadata
+ * cache will use this callback to get the size of the block to be
+ * freed, instead of using the size of the entry as is done otherwise.
*
- * At present this callback is used only by the H5FA and H5EA dblock
- * and dblock page client classes.
+ * At present this callback is used only by the H5FA and H5EA dblock
+ * and dblock page client classes.
*
* The typedef for the get_fsf_size callback is as follows:
*
@@ -824,15 +828,15 @@ typedef struct H5C_t H5C_t;
* is the same pointer that would be returned by a protect()
* call of the associated addr and len.
*
- * fs_size_ptr: Pointer to hsize_t in which the callback will return
+ * fs_size_ptr: Pointer to hsize_t in which the callback will return
* the size of the piece of file space to be freed. Note
- * that the space to be freed is presumed to have the same
- * base address as the cache entry.
+ * that the space to be freed is presumed to have the same
+ * base address as the cache entry.
*
* The function simply returns the size of the block of file space
- * to be freed in *fsf_size_ptr.
+ * to be freed in *fsf_size_ptr.
*
- * If the function is successful, it must return SUCCEED.
+ * If the function is successful, it must return SUCCEED.
*
* If it fails for any reason, the function must return FAIL and
* push error information on the error stack with the error API
@@ -953,18 +957,10 @@ typedef enum H5C_notify_action_t {
*/
H5C_NOTIFY_ACTION_ENTRY_DIRTIED, /* Entry has been marked dirty. */
H5C_NOTIFY_ACTION_ENTRY_CLEANED, /* Entry has been marked clean. */
- H5C_NOTIFY_ACTION_CHILD_DIRTIED, /* Dependent child has been marked
- * dirty.
- */
- H5C_NOTIFY_ACTION_CHILD_CLEANED, /* Dependent child has been marked
- * clean.
- */
- H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, /* Dependent child has been marked
- * unserialized.
- */
- H5C_NOTIFY_ACTION_CHILD_SERIALIZED /* Dependent child has been marked
- * serialized.
- */
+ H5C_NOTIFY_ACTION_CHILD_DIRTIED, /* Dependent child has been marked dirty. */
+ H5C_NOTIFY_ACTION_CHILD_CLEANED, /* Dependent child has been marked clean. */
+ H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, /* Dependent child has been marked unserialized. */
+ H5C_NOTIFY_ACTION_CHILD_SERIALIZED /* Dependent child has been marked serialized. */
} H5C_notify_action_t;
/* Cache client callback function pointers */
@@ -1079,9 +1075,9 @@ typedef int H5C_ring_t;
*
* The fields of this structure are discussed individually below:
*
- * JRM - 4/26/04
+ * JRM - 4/26/04
*
- * magic: Unsigned 32 bit integer that must always be set to
+ * magic: Unsigned 32 bit integer that must always be set to
* H5C__H5C_CACHE_ENTRY_T_MAGIC when the entry is valid.
* The field must be set to H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC
* just before the entry is freed.
@@ -1100,129 +1096,129 @@ typedef int H5C_ring_t;
* detect this case, and re-start its scan from the bottom
* of the LRU when this situation occurs.
*
- * cache_ptr: Pointer to the cache that this entry is contained within.
+ * cache_ptr: Pointer to the cache that this entry is contained within.
*
- * addr: Base address of the cache entry on disk.
+ * addr: Base address of the cache entry on disk.
*
- * size: Length of the cache entry on disk in bytes Note that unlike
+ * size: Length of the cache entry on disk in bytes Note that unlike
* normal caches, the entries in this cache are of arbitrary size.
*
- * The file space allocations for cache entries implied by the
+ * The file space allocations for cache entries implied by the
* addr and size fields must be disjoint.
*
- * image_ptr: Pointer to void. When not NULL, this field points to a
- * dynamically allocated block of size bytes in which the
- * on disk image of the metadata cache entry is stored.
+ * image_ptr: Pointer to void. When not NULL, this field points to a
+ * dynamically allocated block of size bytes in which the
+ * on disk image of the metadata cache entry is stored.
*
- * If the entry is dirty, the pre-serialize and serialize
- * callbacks must be used to update this image before it is
- * written to disk
+ * If the entry is dirty, the pre-serialize and serialize
+ * callbacks must be used to update this image before it is
+ * written to disk
*
* image_up_to_date: Boolean flag that is set to TRUE when *image_ptr
- * is up to date, and set to false when the entry is dirtied.
+ * is up to date, and set to false when the entry is dirtied.
*
- * type: Pointer to the instance of H5C_class_t containing pointers
- * to the methods for cache entries of the current type. This
- * field should be NULL when the instance of H5C_cache_entry_t
- * is not in use.
+ * type: Pointer to the instance of H5C_class_t containing pointers
+ * to the methods for cache entries of the current type. This
+ * field should be NULL when the instance of H5C_cache_entry_t
+ * is not in use.
*
- * The name is not particularly descriptive, but is retained
- * to avoid changes in existing code.
+ * The name is not particularly descriptive, but is retained
+ * to avoid changes in existing code.
*
- * is_dirty: Boolean flag indicating whether the contents of the cache
- * entry has been modified since the last time it was written
- * to disk.
+ * is_dirty: Boolean flag indicating whether the contents of the cache
+ * entry has been modified since the last time it was written
+ * to disk.
*
- * dirtied: Boolean flag used to indicate that the entry has been
- * dirtied while protected.
+ * dirtied: Boolean flag used to indicate that the entry has been
+ * dirtied while protected.
*
- * This field is set to FALSE in the protect call, and may
- * be set to TRUE by the H5C_mark_entry_dirty() call at any
- * time prior to the unprotect call.
+ * This field is set to FALSE in the protect call, and may
+ * be set to TRUE by the H5C_mark_entry_dirty() call at any
+ * time prior to the unprotect call.
*
- * The H5C_mark_entry_dirty() call exists as a convenience
- * function for the fractal heap code which may not know if
- * an entry is protected or pinned, but knows that is either
- * protected or pinned. The dirtied field was added as in
- * the parallel case, it is necessary to know whether a
- * protected entry is dirty prior to the protect call.
+ * The H5C_mark_entry_dirty() call exists as a convenience
+ * function for the fractal heap code which may not know if
+ * an entry is protected or pinned, but knows that is either
+ * protected or pinned. The dirtied field was added as in
+ * the parallel case, it is necessary to know whether a
+ * protected entry is dirty prior to the protect call.
*
* is_protected: Boolean flag indicating whether this entry is protected
- * (or locked, to use more conventional terms). When it is
- * protected, the entry cannot be flushed or accessed until
- * it is unprotected (or unlocked -- again to use more
- * conventional terms).
+ * (or locked, to use more conventional terms). When it is
+ * protected, the entry cannot be flushed or accessed until
+ * it is unprotected (or unlocked -- again to use more
+ * conventional terms).
*
- * Note that protected entries are removed from the LRU lists
- * and inserted on the protected list.
+ * Note that protected entries are removed from the LRU lists
+ * and inserted on the protected list.
*
* is_read_only: Boolean flag that is only meaningful if is_protected is
- * TRUE. In this circumstance, it indicates whether the
- * entry has been protected read-only, or read/write.
+ * TRUE. In this circumstance, it indicates whether the
+ * entry has been protected read-only, or read/write.
*
- * If the entry has been protected read-only (i.e. is_protected
- * and is_read_only are both TRUE), we allow the entry to be
- * protected more than once.
+ * If the entry has been protected read-only (i.e. is_protected
+ * and is_read_only are both TRUE), we allow the entry to be
+ * protected more than once.
*
- * In this case, the number of readers is maintained in the
- * ro_ref_count field (see below), and unprotect calls simply
- * decrement that field until it drops to zero, at which point
- * the entry is actually unprotected.
+ * In this case, the number of readers is maintained in the
+ * ro_ref_count field (see below), and unprotect calls simply
+ * decrement that field until it drops to zero, at which point
+ * the entry is actually unprotected.
*
* ro_ref_count: Integer field used to maintain a count of the number of
- * outstanding read-only protects on this entry. This field
- * must be zero whenever either is_protected or is_read_only
- * are TRUE.
+ * outstanding read-only protects on this entry. This field
+ * must be zero whenever either is_protected or is_read_only
+ * are TRUE.
*
- * is_pinned: Boolean flag indicating whether the entry has been pinned
- * in the cache.
+ * is_pinned: Boolean flag indicating whether the entry has been pinned
+ * in the cache.
*
- * For very hot entries, the protect / unprotect overhead
- * can become excessive. Thus the cache has been extended
- * to allow an entry to be "pinned" in the cache.
+ * For very hot entries, the protect / unprotect overhead
+ * can become excessive. Thus the cache has been extended
+ * to allow an entry to be "pinned" in the cache.
*
- * Pinning an entry in the cache has several implications:
+ * Pinning an entry in the cache has several implications:
*
- * 1) A pinned entry cannot be evicted. Thus unprotected
- * pinned entries must be stored in the pinned entry
- * list, instead of being managed by the replacement
- * policy code (LRU at present).
+ * 1) A pinned entry cannot be evicted. Thus unprotected
+ * pinned entries must be stored in the pinned entry
+ * list, instead of being managed by the replacement
+ * policy code (LRU at present).
*
- * 2) A pinned entry can be accessed or modified at any time.
- * This places an extra burden on the pre-serialize and
- * serialize callbacks, which must ensure that a pinned
- * entry is consistent and ready to write to disk before
- * generating an image.
+ * 2) A pinned entry can be accessed or modified at any time.
+ * This places an extra burden on the pre-serialize and
+ * serialize callbacks, which must ensure that a pinned
+ * entry is consistent and ready to write to disk before
+ * generating an image.
*
- * 3) A pinned entry can be marked as dirty (and possibly
- * change size) while it is unprotected.
+ * 3) A pinned entry can be marked as dirty (and possibly
+ * change size) while it is unprotected.
*
- * 4) The flush-destroy code must allow pinned entries to
- * be unpinned (and possibly unprotected) during the
- * flush.
+ * 4) The flush-destroy code must allow pinned entries to
+ * be unpinned (and possibly unprotected) during the
+ * flush.
*
- * JRM -- 3/16/06
+ * JRM -- 3/16/06
*
- * in_slist: Boolean flag indicating whether the entry is in the skip list
- * As a general rule, entries are placed in the list when they
+ * in_slist: Boolean flag indicating whether the entry is in the skip list
+ * As a general rule, entries are placed in the list when they
* are marked dirty. However they may remain in the list after
* being flushed.
*
* Update: Dirty entries are now removed from the skip list
- * when they are flushed.
+ * when they are flushed.
*
* flush_marker: Boolean flag indicating that the entry is to be flushed
- * the next time H5C_flush_cache() is called with the
- * H5C__FLUSH_MARKED_ENTRIES_FLAG. The flag is reset when
- * the entry is flushed for whatever reason.
+ * the next time H5C_flush_cache() is called with the
+ * H5C__FLUSH_MARKED_ENTRIES_FLAG. The flag is reset when
+ * the entry is flushed for whatever reason.
*
* flush_me_last: Boolean flag indicating that this entry should not be
- * flushed from the cache until all other entries without
+ * flushed from the cache until all other entries without
* the flush_me_last flag set have been flushed.
*
- * Note:
+ * Note:
*
- * At this time, the flush_me_last
+ * At this time, the flush_me_last
* flag will only be applied to one entry, the superblock,
* and the code utilizing these flags is protected with HDasserts
* to enforce this. This restraint can certainly be relaxed in
@@ -1231,37 +1227,37 @@ typedef int H5C_ring_t;
* will need to be expanded and tested appropriately if that
* functionality is desired.
*
- * Update: There are now two possible last entries
- * (superblock and file driver info message). This
- * number will probably increase as we add superblock
- * messages. JRM -- 11/18/14
+ * Update: There are now two possible last entries
+ * (superblock and file driver info message). This
+ * number will probably increase as we add superblock
+ * messages. JRM -- 11/18/14
*
* clear_on_unprotect: Boolean flag used only in PHDF5. When H5C is used
- * to implement the metadata cache In the parallel case, only
- * the cache with mpi rank 0 is allowed to actually write to
- * file -- all other caches must retain dirty entries until they
- * are advised that the entry is clean.
+ * to implement the metadata cache In the parallel case, only
+ * the cache with mpi rank 0 is allowed to actually write to
+ * file -- all other caches must retain dirty entries until they
+ * are advised that the entry is clean.
*
- * This flag is used in the case that such an advisory is
- * received when the entry is protected. If it is set when an
- * entry is unprotected, and the dirtied flag is not set in
- * the unprotect, the entry's is_dirty flag is reset by flushing
- * it with the H5C__FLUSH_CLEAR_ONLY_FLAG.
+ * This flag is used in the case that such an advisory is
+ * received when the entry is protected. If it is set when an
+ * entry is unprotected, and the dirtied flag is not set in
+ * the unprotect, the entry's is_dirty flag is reset by flushing
+ * it with the H5C__FLUSH_CLEAR_ONLY_FLAG.
*
* flush_immediately: Boolean flag used only in Phdf5 -- and then only
- * for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
+ * for H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED.
*
- * When a distributed metadata write is triggered at a
- * sync point, this field is used to mark entries that
- * must be flushed before leaving the sync point. At all
- * other times, this field should be set to FALSE.
+ * When a distributed metadata write is triggered at a
+ * sync point, this field is used to mark entries that
+ * must be flushed before leaving the sync point. At all
+ * other times, this field should be set to FALSE.
*
* flush_in_progress: Boolean flag that is set to true iff the entry
- * is in the process of being flushed. This allows the cache
- * to detect when a call is the result of a flush callback.
+ * is in the process of being flushed. This allows the cache
+ * to detect when a call is the result of a flush callback.
*
* destroy_in_progress: Boolean flag that is set to true iff the entry
- * is in the process of being flushed and destroyed.
+ * is in the process of being flushed and destroyed.
*
*
* Fields supporting rings for flush ordering:
@@ -1279,8 +1275,8 @@ typedef int H5C_ring_t;
* within rings. Unlike rings, flush dependencies are applied to ALL
* writes, not just those triggered by flush or serialize calls.
*
- * ring: Instance of H5C_ring_t indicating the ring to which this
- * entry is assigned.
+ * ring: Instance of H5C_ring_t indicating the ring to which this
+ * entry is assigned.
*
*
* Fields supporting the 'flush dependency' feature:
@@ -1310,18 +1306,18 @@ typedef int H5C_ring_t;
* this field is nonzero, then this entry cannot be flushed.
*
* flush_dep_nunser_children: Number of flush dependency children
- * that are either unserialized, or have a non-zero number of
- * positive number of unserialized children.
+ * that are either unserialized, or have a non-zero number of
+ * positive number of unserialized children.
*
- * Note that since there is no requirement that a clean entry
- * be serialized, it is possible that flush_dep_nunser_children
- * to be greater than flush_dep_ndirty_children.
+ * Note that since there is no requirement that a clean entry
+ * be serialized, it is possible that flush_dep_nunser_children
+ * to be greater than flush_dep_ndirty_children.
*
- * This field exist to facilitate correct ordering of entry
- * serializations when it is necessary to serialize all the
- * entries in the metadata cache. Thus in the cache
- * serialization, no entry can be serialized unless this
- * field contains 0.
+ * This field exist to facilitate correct ordering of entry
+ * serializations when it is necessary to serialize all the
+ * entries in the metadata cache. Thus in the cache
+ * serialization, no entry can be serialized unless this
+ * field contains 0.
*
* Fields supporting the hash table:
*
@@ -1338,25 +1334,25 @@ typedef int H5C_ring_t;
* The il_next and il_prev fields discussed below were added to support
* the index list.
*
- * ht_next: Next pointer used by the hash table to store multiple
- * entries in a single hash bin. This field points to the
- * next entry in the doubly linked list of entries in the
- * hash bin, or NULL if there is no next entry.
+ * ht_next: Next pointer used by the hash table to store multiple
+ * entries in a single hash bin. This field points to the
+ * next entry in the doubly linked list of entries in the
+ * hash bin, or NULL if there is no next entry.
*
* ht_prev: Prev pointer used by the hash table to store multiple
* entries in a single hash bin. This field points to the
* previous entry in the doubly linked list of entries in
- * the hash bin, or NULL if there is no previuos entry.
+ * the hash bin, or NULL if there is no previuos entry.
*
- * il_next: Next pointer used by the index to maintain a doubly linked
- * list of all entries in the index (and thus in the cache).
- * This field contains a pointer to the next entry in the
- * index list, or NULL if there is no next entry.
+ * il_next: Next pointer used by the index to maintain a doubly linked
+ * list of all entries in the index (and thus in the cache).
+ * This field contains a pointer to the next entry in the
+ * index list, or NULL if there is no next entry.
*
- * il_prev: Prev pointer used by the index to maintain a doubly linked
- * list of all entries in the index (and thus in the cache).
- * This field contains a pointer to the previous entry in the
- * index list, or NULL if there is no previous entry.
+ * il_prev: Prev pointer used by the index to maintain a doubly linked
+ * list of all entries in the index (and thus in the cache).
+ * This field contains a pointer to the previous entry in the
+ * index list, or NULL if there is no previous entry.
*
*
* Fields supporting replacement policies:
@@ -1393,43 +1389,43 @@ typedef int H5C_ring_t;
* The use of the replacement policy fields under the Modified LRU policy
* is discussed below:
*
- * next: Next pointer in either the LRU, the protected list, or
- * the pinned list depending on the current values of
- * is_protected and is_pinned. If there is no next entry
- * on the list, this field should be set to NULL.
+ * next: Next pointer in either the LRU, the protected list, or
+ * the pinned list depending on the current values of
+ * is_protected and is_pinned. If there is no next entry
+ * on the list, this field should be set to NULL.
*
- * prev: Prev pointer in either the LRU, the protected list,
- * or the pinned list depending on the current values of
- * is_protected and is_pinned. If there is no previous
- * entry on the list, this field should be set to NULL.
+ * prev: Prev pointer in either the LRU, the protected list,
+ * or the pinned list depending on the current values of
+ * is_protected and is_pinned. If there is no previous
+ * entry on the list, this field should be set to NULL.
*
- * aux_next: Next pointer on either the clean or dirty LRU lists.
- * This entry should be NULL when either is_protected or
- * is_pinned is true.
+ * aux_next: Next pointer on either the clean or dirty LRU lists.
+ * This entry should be NULL when either is_protected or
+ * is_pinned is true.
*
- * When is_protected and is_pinned are false, and is_dirty is
- * true, it should point to the next item on the dirty LRU
- * list.
+ * When is_protected and is_pinned are false, and is_dirty is
+ * true, it should point to the next item on the dirty LRU
+ * list.
*
- * When is_protected and is_pinned are false, and is_dirty is
- * false, it should point to the next item on the clean LRU
- * list. In either case, when there is no next item, it
- * should be NULL.
+ * When is_protected and is_pinned are false, and is_dirty is
+ * false, it should point to the next item on the clean LRU
+ * list. In either case, when there is no next item, it
+ * should be NULL.
*
- * aux_prev: Previous pointer on either the clean or dirty LRU lists.
- * This entry should be NULL when either is_protected or
- * is_pinned is true.
+ * aux_prev: Previous pointer on either the clean or dirty LRU lists.
+ * This entry should be NULL when either is_protected or
+ * is_pinned is true.
*
- * When is_protected and is_pinned are false, and is_dirty is
- * true, it should point to the previous item on the dirty
- * LRU list.
+ * When is_protected and is_pinned are false, and is_dirty is
+ * true, it should point to the previous item on the dirty
+ * LRU list.
*
- * When is_protected and is_pinned are false, and is_dirty
- * is false, it should point to the previous item on the
- * clean LRU list.
+ * When is_protected and is_pinned are false, and is_dirty
+ * is false, it should point to the previous item on the
+ * clean LRU list.
*
- * In either case, when there is no previous item, it should
- * be NULL.
+ * In either case, when there is no previous item, it should
+ * be NULL.
*
* Fields supporting the cache image feature:
*
@@ -1445,170 +1441,170 @@ typedef int H5C_ring_t;
* further details.
*
* include_in_image: Boolean flag indicating whether this entry should
- * be included in the metadata cache image. This field should
- * always be false prior to the H5C_prep_for_file_close() call.
- * During that call, it should be set to TRUE for all entries
- * that are to be included in the metadata cache image. At
- * present, only the superblock, the superblock extension
- * object header and its chunks (if any) are omitted from
- * the image.
+ * be included in the metadata cache image. This field should
+ * always be false prior to the H5C_prep_for_file_close() call.
+ * During that call, it should be set to TRUE for all entries
+ * that are to be included in the metadata cache image. At
+ * present, only the superblock, the superblock extension
+ * object header and its chunks (if any) are omitted from
+ * the image.
*
- * lru_rank: Rank of the entry in the LRU just prior to file close.
+ * lru_rank: Rank of the entry in the LRU just prior to file close.
*
- * Note that the first entry on the LRU has lru_rank 1,
- * and that entries not on the LRU at that time will have
- * either lru_rank -1 (if pinned) or 0 (if loaded during
- * the process of flushing the cache.
+ * Note that the first entry on the LRU has lru_rank 1,
+ * and that entries not on the LRU at that time will have
+ * either lru_rank -1 (if pinned) or 0 (if loaded during
+ * the process of flushing the cache.
*
* image_dirty: Boolean flag indicating whether the entry should be marked
- * as dirty in the metadata cache image. The flag is set to
- * TRUE iff the entry is dirty when H5C_prep_for_file_close()
- * is called.
+ * as dirty in the metadata cache image. The flag is set to
+ * TRUE iff the entry is dirty when H5C_prep_for_file_close()
+ * is called.
*
* fd_parent_count: If the entry is a child in one or more flush dependency
- * relationships, this field contains the number of flush
- * dependency parents.
+ * relationships, this field contains the number of flush
+ * dependency parents.
*
- * In all other cases, the field is set to zero.
+ * In all other cases, the field is set to zero.
*
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any parents
- * that are not in the image are removed from this count and
- * from the fd_parent_addrs array below.
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any parents
+ * that are not in the image are removed from this count and
+ * from the fd_parent_addrs array below.
*
- * Finally observe that if the entry is dirty and in the
- * cache image, and its parent is dirty and not in the cache
- * image, then the entry must be removed from the cache image
- * to avoid violating the flush dependency flush ordering.
+ * Finally observe that if the entry is dirty and in the
+ * cache image, and its parent is dirty and not in the cache
+ * image, then the entry must be removed from the cache image
+ * to avoid violating the flush dependency flush ordering.
*
* fd_parent_addrs: If the entry is a child in one or more flush dependency
- * relationship when H5C_prep_for_file_close() is called, this
- * field must contain a pointer to an array of size
- * fd_parent_count containing the on disk addresses of the
- * parent.
+ * relationship when H5C_prep_for_file_close() is called, this
+ * field must contain a pointer to an array of size
+ * fd_parent_count containing the on disk addresses of the
+ * parent.
*
- * In all other cases, the field is set to NULL.
+ * In all other cases, the field is set to NULL.
*
- * Note that while this list of addresses is initially taken
- * from the flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any parents
- * that are not in the image are removed from this list, and
- * and from the fd_parent_count above.
+ * Note that while this list of addresses is initially taken
+ * from the flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any parents
+ * that are not in the image are removed from this list, and
+ * and from the fd_parent_count above.
*
- * Finally observe that if the entry is dirty and in the
- * cache image, and its parent is dirty and not in the cache
- * image, then the entry must be removed from the cache image
- * to avoid violating the flush dependency flush ordering.
+ * Finally observe that if the entry is dirty and in the
+ * cache image, and its parent is dirty and not in the cache
+ * image, then the entry must be removed from the cache image
+ * to avoid violating the flush dependency flush ordering.
*
* fd_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of flush
- * dependency children.
+ * relationship, this field contains the number of flush
+ * dependency children.
*
- * In all other cases, the field is set to zero.
+ * In all other cases, the field is set to zero.
*
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any children
- * that are not in the image are removed from this count.
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any children
+ * that are not in the image are removed from this count.
*
* fd_dirty_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of dirty flush
- * dependency children.
+ * relationship, this field contains the number of dirty flush
+ * dependency children.
*
- * In all other cases, the field is set to zero.
+ * In all other cases, the field is set to zero.
*
- * Note that while this count is initially taken from the
- * flush dependency fields above, if the entry is in the
- * cache image (i.e. include_in_image is TRUE), any dirty
- * children that are not in the image are removed from this
- * count.
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any dirty
+ * children that are not in the image are removed from this
+ * count.
*
* image_fd_height: Flush dependency height of the entry in the cache image.
*
- * The flush dependency height of any entry involved in a
- * flush dependency relationship is defined to be the
- * longest flush dependency path from that entry to an entry
- * with no flush dependency children.
- *
- * Since the image_fd_height is used to order entries in the
- * cache image so that fd parents preceed fd children, for
- * purposes of this field, and entry is at flush dependency
- * level 0 if it either has no children, or if all of its
- * children are not in the cache image.
- *
- * Note that if a child in a flush dependency relationship is
- * dirty and in the cache image, and its parent is dirty and
- * not in the cache image, then the child must be excluded
- * from the cache image to maintain flush ordering.
- *
- * prefetched: Boolean flag indicating that the on disk image of the entry
- * has been loaded into the cache prior any request for the
- * entry by the rest of the library.
- *
- * As of this writing (8/10/15), this can only happen through
- * the load of a cache image block, although other scenarios
- * are contemplated for the use of this feature. Note that
- * unlike the usual prefetch situation, this means that a
- * prefetched entry can be dirty, and/or can be a party to
- * flush dependency relationship(s). This complicates matters
- * somewhat.
- *
- * The essential feature of a prefetched entry is that it
- * consists only of a buffer containing the on disk image of
- * the entry. Thus it must be deserialized before it can
- * be passed back to the library on a protect call. This
- * task is handled by H5C_deserialized_prefetched_entry().
- * In essence, this routine calls the deserialize callback
- * provided in the protect call with the on disk image,
- * deletes the prefetched entry from the cache, and replaces
- * it with the deserialized entry returned by the deserialize
- * callback.
- *
- * Further, if the prefetched entry is a flush dependency parent,
- * all its flush dependency children (which must also be
- * prefetched entries), must be transferred to the new cache
- * entry returned by the deserialization callback.
- *
- * Finally, if the prefetched entry is a flush dependency child,
- * this flush dependency must be destroyed prior to the
- * deserialize call.
- *
- * In addition to the above special processing on the first
- * protect call on a prefetched entry (after which is no longer
- * a prefetched entry), prefetched entries also require special
- * tretment on flush and evict.
- *
- * On flush, a dirty prefetched entry must simply be written
- * to disk and marked clean without any call to any client
- * callback.
- *
- * On eviction, if a prefetched entry is a flush dependency
- * child, that flush dependency relationship must be destroyed
- * just prior to the eviction. If the flush dependency code
- * is working properly, it should be impossible for any entry
- * that is a flush dependency parent to be evicted.
+ * The flush dependency height of any entry involved in a
+ * flush dependency relationship is defined to be the
+ * longest flush dependency path from that entry to an entry
+ * with no flush dependency children.
+ *
+ * Since the image_fd_height is used to order entries in the
+ * cache image so that fd parents preceed fd children, for
+ * purposes of this field, and entry is at flush dependency
+ * level 0 if it either has no children, or if all of its
+ * children are not in the cache image.
+ *
+ * Note that if a child in a flush dependency relationship is
+ * dirty and in the cache image, and its parent is dirty and
+ * not in the cache image, then the child must be excluded
+ * from the cache image to maintain flush ordering.
+ *
+ * prefetched: Boolean flag indicating that the on disk image of the entry
+ * has been loaded into the cache prior any request for the
+ * entry by the rest of the library.
+ *
+ * As of this writing (8/10/15), this can only happen through
+ * the load of a cache image block, although other scenarios
+ * are contemplated for the use of this feature. Note that
+ * unlike the usual prefetch situation, this means that a
+ * prefetched entry can be dirty, and/or can be a party to
+ * flush dependency relationship(s). This complicates matters
+ * somewhat.
+ *
+ * The essential feature of a prefetched entry is that it
+ * consists only of a buffer containing the on disk image of
+ * the entry. Thus it must be deserialized before it can
+ * be passed back to the library on a protect call. This
+ * task is handled by H5C_deserialized_prefetched_entry().
+ * In essence, this routine calls the deserialize callback
+ * provided in the protect call with the on disk image,
+ * deletes the prefetched entry from the cache, and replaces
+ * it with the deserialized entry returned by the deserialize
+ * callback.
+ *
+ * Further, if the prefetched entry is a flush dependency parent,
+ * all its flush dependency children (which must also be
+ * prefetched entries), must be transferred to the new cache
+ * entry returned by the deserialization callback.
+ *
+ * Finally, if the prefetched entry is a flush dependency child,
+ * this flush dependency must be destroyed prior to the
+ * deserialize call.
+ *
+ * In addition to the above special processing on the first
+ * protect call on a prefetched entry (after which is no longer
+ * a prefetched entry), prefetched entries also require special
+ * tretment on flush and evict.
+ *
+ * On flush, a dirty prefetched entry must simply be written
+ * to disk and marked clean without any call to any client
+ * callback.
+ *
+ * On eviction, if a prefetched entry is a flush dependency
+ * child, that flush dependency relationship must be destroyed
+ * just prior to the eviction. If the flush dependency code
+ * is working properly, it should be impossible for any entry
+ * that is a flush dependency parent to be evicted.
*
* prefetch_type_id: Integer field containing the type ID of the prefetched
- * entry. This ID must match the ID of the type provided in any
- * protect call on the prefetched entry.
+ * entry. This ID must match the ID of the type provided in any
+ * protect call on the prefetched entry.
*
- * The value of this field is undefined in prefetched is FALSE.
+ * The value of this field is undefined in prefetched is FALSE.
*
- * age: Number of times a prefetched entry has appeared in
- * subsequent cache images. The field exists to allow
- * imposition of a limit on how many times a prefetched
- * entry can appear in subsequent cache images without being
- * converted to a regular entry.
+ * age: Number of times a prefetched entry has appeared in
+ * subsequent cache images. The field exists to allow
+ * imposition of a limit on how many times a prefetched
+ * entry can appear in subsequent cache images without being
+ * converted to a regular entry.
*
- * This field must be zero if prefetched is FALSE.
+ * This field must be zero if prefetched is FALSE.
*
* prefetched_dirty: Boolean field that must be set to FALSE unless the
- * following conditions hold:
+ * following conditions hold:
*
- * 1) The file has been opened R/O.
+ * 1) The file has been opened R/O.
*
- * 2) The entry is either a prefetched entry, or was
+ * 2) The entry is either a prefetched entry, or was
* re-constructed from a prefetched entry.
*
* 3) The base prefetched entry was marked dirty.
@@ -1644,14 +1640,14 @@ typedef int H5C_ring_t;
* we deal with this by disabling EOC in the R/O case.
*
* serialization_count: Integer field used to maintain a count of the
- * number of times each entry is serialized during cache
- * serialization. While no entry should be serialized more than
- * once in any serialization call, throw an assertion if any
- * flush depencency parent is serialized more than once during
- * a single cache serialization.
+ * number of times each entry is serialized during cache
+ * serialization. While no entry should be serialized more than
+ * once in any serialization call, throw an assertion if any
+ * flush depencency parent is serialized more than once during
+ * a single cache serialization.
*
- * This is a debugging field, and thus is maintained only if
- * NDEBUG is undefined.
+ * This is a debugging field, and thus is maintained only if
+ * NDEBUG is undefined.
*
* Fields supporting tagged entries:
*
@@ -1662,15 +1658,15 @@ typedef int H5C_ring_t;
* not tagged. Tagged entries have a pointer to the tag info for the object,
* which is shared state for all the entries for that object.
*
- * tl_next: Pointer to the next entry in the tag list for an object.
- * NULL for the tail entry in the list, as well as untagged
- * entries.
+ * tl_next: Pointer to the next entry in the tag list for an object.
+ * NULL for the tail entry in the list, as well as untagged
+ * entries.
*
- * tl_prev: Pointer to the previous entry in the tag list for an object.
- * NULL for the head entry in the list, as well as untagged
- * entries.
+ * tl_prev: Pointer to the previous entry in the tag list for an object.
+ * NULL for the head entry in the list, as well as untagged
+ * entries.
*
- * tag_info: Pointer to the common tag state for all entries belonging to
+ * tag_info: Pointer to the common tag state for all entries belonging to
* an object. NULL for untagged entries.
*
* Fields supporting VFD SWMR
@@ -1709,17 +1705,17 @@ typedef int H5C_ring_t;
* and H5C_COLLECT_CACHE_ENTRY_STATS are true. When present, they allow
* collection of statistics on individual cache entries.
*
- * accesses: int32_t containing the number of times this cache entry has
- * been referenced in its lifetime.
+ * accesses: int32_t containing the number of times this cache entry has
+ * been referenced in its lifetime.
*
- * clears: int32_t containing the number of times this cache entry has
- * been cleared in its life time.
+ * clears: int32_t containing the number of times this cache entry has
+ * been cleared in its life time.
*
- * flushes: int32_t containing the number of times this cache entry has
- * been flushed to file in its life time.
+ * flushes: int32_t containing the number of times this cache entry has
+ * been flushed to file in its life time.
*
- * pins: int32_t containing the number of times this cache entry has
- * been pinned in cache in its life time.
+ * pins: int32_t containing the number of times this cache entry has
+ * been pinned in cache in its life time.
*
****************************************************************************/
typedef struct H5C_cache_entry_t {
@@ -1827,43 +1823,43 @@ typedef struct H5C_cache_entry_t {
*
* The fields of this structure are discussed individually below:
*
- * JRM - 8/5/15
+ * JRM - 8/5/15
*
- * magic: Unsigned 32 bit integer that must always be set to
+ * magic: Unsigned 32 bit integer that must always be set to
* H5C_IMAGE_ENTRY_T_MAGIC when the entry is valid.
* The field must be set to H5C_IMAGE_ENTRY_T_BAD_MAGIC
* just before the entry is freed.
*
- * addr: Base address of the cache entry on disk.
+ * addr: Base address of the cache entry on disk.
*
- * size: Length of the cache entry on disk in bytes.
+ * size: Length of the cache entry on disk in bytes.
*
- * ring: Instance of H5C_ring_t indicating the flush ordering ring
- * to which this entry is assigned.
+ * ring: Instance of H5C_ring_t indicating the flush ordering ring
+ * to which this entry is assigned.
*
- * age: Number of times this prefetech entry has appeared in
- * the current sequence of cache images. This field is
- * initialized to 0 if the instance of H5C_image_entry_t
- * is constructed from a regular entry.
+ * age: Number of times this prefetech entry has appeared in
+ * the current sequence of cache images. This field is
+ * initialized to 0 if the instance of H5C_image_entry_t
+ * is constructed from a regular entry.
*
- * If the instance is constructed from a prefetched entry
- * currently residing in the metadata cache, the field is
- * set to 1 + the age of the prefetched entry, or to
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX if that sum exceeds
- * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX.
+ * If the instance is constructed from a prefetched entry
+ * currently residing in the metadata cache, the field is
+ * set to 1 + the age of the prefetched entry, or to
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX if that sum exceeds
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX.
*
- * type_id: Integer field containing the type ID of the entry.
+ * type_id: Integer field containing the type ID of the entry.
*
- * lru_rank: Rank of the entry in the LRU just prior to file close.
+ * lru_rank: Rank of the entry in the LRU just prior to file close.
*
- * Note that the first entry on the LRU has lru_rank 1,
- * and that entries not on the LRU at that time will have
- * either lru_rank -1 (if pinned) or 0 (if loaded during
- * the process of flushing the cache.
+ * Note that the first entry on the LRU has lru_rank 1,
+ * and that entries not on the LRU at that time will have
+ * either lru_rank -1 (if pinned) or 0 (if loaded during
+ * the process of flushing the cache.
*
- * is_dirty: Boolean flag indicating whether the contents of the cache
- * entry has been modified since the last time it was written
- * to disk as a regular piece of metadata.
+ * is_dirty: Boolean flag indicating whether the contents of the cache
+ * entry has been modified since the last time it was written
+ * to disk as a regular piece of metadata.
*
* image_fd_height: Flush dependency height of the entry in the cache image.
*
@@ -1891,17 +1887,17 @@ typedef struct H5C_cache_entry_t {
*
* Note that while this count is initially taken from the
* flush dependency fields in the associated instance of
- * H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any parents that are
- * not in the image are removed from this count and
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any parents that are
+ * not in the image are removed from this count and
* from the fd_parent_addrs array below.
*
* Finally observe that if the entry is dirty and in the
* cache image, and its parent is dirty and not in the cache
* image, then the entry must be removed from the cache image
* to avoid violating the flush dependency flush ordering.
- * This should have happened before the construction of
- * the instance of H5C_image_entry_t.
+ * This should have happened before the construction of
+ * the instance of H5C_image_entry_t.
*
* fd_parent_addrs: If the entry is a child in one or more flush dependency
* relationship when H5C_prep_for_file_close() is called, this
@@ -1914,27 +1910,27 @@ typedef struct H5C_cache_entry_t {
* Note that while this list of addresses is initially taken
* from the flush dependency fields in the associated instance of
* H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any parents that are not
- * in the image are removed from this list, and from the
- * fd_parent_count above.
+ * (i.e. include_in_image is TRUE), any parents that are not
+ * in the image are removed from this list, and from the
+ * fd_parent_count above.
*
* Finally observe that if the entry is dirty and in the
* cache image, and its parent is dirty and not in the cache
* image, then the entry must be removed from the cache image
* to avoid violating the flush dependency flush ordering.
- * This should have happened before the construction of
- * the instance of H5C_image_entry_t.
+ * This should have happened before the construction of
+ * the instance of H5C_image_entry_t.
*
* fd_child_count: If the entry is a parent in a flush dependency
- * relationship, this field contains the number of flush
- * dependency children.
+ * relationship, this field contains the number of flush
+ * dependency children.
*
- * In all other cases, the field is set to zero.
+ * In all other cases, the field is set to zero.
*
* Note that while this count is initially taken from the
* flush dependency fields in the associated instance of
* H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any children
+ * (i.e. include_in_image is TRUE), any children
* that are not in the image are removed from this count.
*
* fd_dirty_child_count: If the entry is a parent in a flush dependency
@@ -1946,16 +1942,16 @@ typedef struct H5C_cache_entry_t {
* Note that while this count is initially taken from the
* flush dependency fields in the associated instance of
* H5C_cache_entry_t, if the entry is in the cache image
- * (i.e. include_in_image is TRUE), any dirty children
- * that are not in the image are removed from this count.
+ * (i.e. include_in_image is TRUE), any dirty children
+ * that are not in the image are removed from this count.
*
- * image_ptr: Pointer to void. When not NULL, this field points to a
- * dynamically allocated block of size bytes in which the
- * on disk image of the metadata cache entry is stored.
+ * image_ptr: Pointer to void. When not NULL, this field points to a
+ * dynamically allocated block of size bytes in which the
+ * on disk image of the metadata cache entry is stored.
*
- * If the entry is dirty, the pre-serialize and serialize
- * callbacks must be used to update this image before it is
- * written to disk
+ * If the entry is dirty, the pre-serialize and serialize
+ * callbacks must be used to update this image before it is
+ * written to disk
*
*
****************************************************************************/
@@ -1987,95 +1983,95 @@ typedef struct H5C_image_entry_t {
* The fields of the structure are discussed individually below:
*
* version: Integer field containing the version number of this version
- * of the H5C_auto_size_ctl_t structure. Any instance of
- * H5C_auto_size_ctl_t passed to the cache must have a known
- * version number, or an error will be flagged.
+ * of the H5C_auto_size_ctl_t structure. Any instance of
+ * H5C_auto_size_ctl_t passed to the cache must have a known
+ * version number, or an error will be flagged.
*
* report_fcn: Pointer to the function that is to be called to report
- * activities each time the auto cache resize code is executed. If the
- * field is NULL, no call is made.
+ * activities each time the auto cache resize code is executed. If the
+ * field is NULL, no call is made.
*
- * If the field is not NULL, it must contain the address of a function
- * of type H5C_auto_resize_report_fcn.
+ * If the field is not NULL, it must contain the address of a function
+ * of type H5C_auto_resize_report_fcn.
*
* set_initial_size: Boolean flag indicating whether the size of the
- * initial size of the cache is to be set to the value given in
- * the initial_size field. If set_initial_size is FALSE, the
- * initial_size field is ignored.
+ * initial size of the cache is to be set to the value given in
+ * the initial_size field. If set_initial_size is FALSE, the
+ * initial_size field is ignored.
*
* initial_size: If enabled, this field contain the size the cache is
- * to be set to upon receipt of this structure. Needless to say,
- * initial_size must lie in the closed interval [min_size, max_size].
+ * to be set to upon receipt of this structure. Needless to say,
+ * initial_size must lie in the closed interval [min_size, max_size].
*
* min_clean_fraction: double in the range 0 to 1 indicating the fraction
- * of the cache that is to be kept clean. This field is only used
- * in parallel mode. Typical values are 0.1 to 0.5.
+ * of the cache that is to be kept clean. This field is only used
+ * in parallel mode. Typical values are 0.1 to 0.5.
*
* max_size: Maximum size to which the cache can be adjusted. The
- * supplied value must fall in the closed interval
- * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, max_size must
- * be greater than or equal to min_size.
+ * supplied value must fall in the closed interval
+ * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, max_size must
+ * be greater than or equal to min_size.
*
* min_size: Minimum size to which the cache can be adjusted. The
- * supplied value must fall in the closed interval
- * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, min_size must
- * be less than or equal to max_size.
+ * supplied value must fall in the closed interval
+ * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, min_size must
+ * be less than or equal to max_size.
*
* epoch_length: Number of accesses on the cache over which to collect
- * hit rate stats before running the automatic cache resize code,
- * if it is enabled.
+ * hit rate stats before running the automatic cache resize code,
+ * if it is enabled.
*
- * At the end of an epoch, we discard prior hit rate data and start
- * collecting afresh. The epoch_length must lie in the closed
- * interval [H5C__MIN_AR_EPOCH_LENGTH, H5C__MAX_AR_EPOCH_LENGTH].
+ * At the end of an epoch, we discard prior hit rate data and start
+ * collecting afresh. The epoch_length must lie in the closed
+ * interval [H5C__MIN_AR_EPOCH_LENGTH, H5C__MAX_AR_EPOCH_LENGTH].
*
*
* Cache size increase control fields:
*
* incr_mode: Instance of the H5C_cache_incr_mode enumerated type whose
- * value indicates how we determine whether the cache size should be
- * increased. At present there are two possible values:
+ * value indicates how we determine whether the cache size should be
+ * increased. At present there are two possible values:
*
- * H5C_incr__off: Don't attempt to increase the size of the cache
- * automatically.
+ * H5C_incr__off: Don't attempt to increase the size of the cache
+ * automatically.
*
- * When this increment mode is selected, the remaining fields
- * in the cache size increase section ar ignored.
+ * When this increment mode is selected, the remaining fields
+ * in the cache size increase section ar ignored.
*
- * H5C_incr__threshold: Attempt to increase the size of the cache
- * whenever the average hit rate over the last epoch drops
- * below the value supplied in the lower_hr_threshold
- * field.
+ * H5C_incr__threshold: Attempt to increase the size of the cache
+ * whenever the average hit rate over the last epoch drops
+ * below the value supplied in the lower_hr_threshold
+ * field.
*
- * Note that this attempt will fail if the cache is already
- * at its maximum size, or if the cache is not already using
- * all available space.
+ * Note that this attempt will fail if the cache is already
+ * at its maximum size, or if the cache is not already using
+ * all available space.
*
* lower_hr_threshold: Lower hit rate threshold. If the increment mode
- * (incr_mode) is H5C_incr__threshold and the hit rate drops below the
- * value supplied in this field in an epoch, increment the cache size by
- * size_increment. Note that cache size may not be incremented above
- * max_size, and that the increment may be further restricted by the
- * max_increment field if it is enabled.
+ * (incr_mode) is H5C_incr__threshold and the hit rate drops below the
+ * value supplied in this field in an epoch, increment the cache size by
+ * size_increment. Note that cache size may not be incremented above
+ * max_size, and that the increment may be further restricted by the
+ * max_increment field if it is enabled.
*
- * When enabled, this field must contain a value in the range [0.0, 1.0].
- * Depending on the incr_mode selected, it may also have to be less than
- * upper_hr_threshold.
+ * When enabled, this field must contain a value in the range [0.0, 1.0].
+ * Depending on the incr_mode selected, it may also have to be less than
+ * upper_hr_threshold.
*
* increment: Double containing the multiplier used to derive the new
- * cache size from the old if a cache size increment is triggered.
- * The increment must be greater than 1.0, and should not exceed 2.0.
+ * cache size from the old if a cache size increment is triggered.
+ * The increment must be greater than 1.0, and should not exceed 2.0.
*
- * The new cache size is obtained by multiplying the current max cache
- * size by the increment, and then clamping to max_size and to stay
- * within the max_increment as necessary.
+ * The new cache size is obtained by multiplying the current max cache
+ * size by the increment, and then clamping to max_size and to stay
+ * within the max_increment as necessary.
*
* apply_max_increment: Boolean flag indicating whether the max_increment
- * field should be used to limit the maximum cache size increment.
+ * field should be used to limit the maximum cache size increment.
*
* max_increment: If enabled by the apply_max_increment field described
- * above, this field contains the maximum number of bytes by which the
- * cache size can be increased in a single re-size.
+ * above, this field contains the maximum number of bytes by which the
+ * cache size can be increased in a single re-size.
*
* flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated
* type whose value indicates whether and by what algorithm we should
@@ -2124,96 +2120,96 @@ typedef struct H5C_image_entry_t {
* is H5C_flash_incr__add_space.
*
* flash_threshold: Double containing the factor by which current max cache
- * size is multiplied to obtain the size threshold for the add_space
- * flash increment algorithm. The field is ignored unless
- * flash_incr_mode is H5C_flash_incr__add_space.
+ * size is multiplied to obtain the size threshold for the add_space
+ * flash increment algorithm. The field is ignored unless
+ * flash_incr_mode is H5C_flash_incr__add_space.
*
*
* Cache size decrease control fields:
*
* decr_mode: Instance of the H5C_cache_decr_mode enumerated type whose
- * value indicates how we determine whether the cache size should be
- * decreased. At present there are four possibilities.
+ * value indicates how we determine whether the cache size should be
+ * decreased. At present there are four possibilities.
*
- * H5C_decr__off: Don't attempt to decrease the size of the cache
- * automatically.
+ * H5C_decr__off: Don't attempt to decrease the size of the cache
+ * automatically.
*
- * When this increment mode is selected, the remaining fields
- * in the cache size decrease section are ignored.
+ * When this increment mode is selected, the remaining fields
+ * in the cache size decrease section are ignored.
*
- * H5C_decr__threshold: Attempt to decrease the size of the cache
- * whenever the average hit rate over the last epoch rises
- * above the value supplied in the upper_hr_threshold
- * field.
+ * H5C_decr__threshold: Attempt to decrease the size of the cache
+ * whenever the average hit rate over the last epoch rises
+ * above the value supplied in the upper_hr_threshold
+ * field.
*
- * H5C_decr__age_out: At the end of each epoch, search the cache for
- * entries that have not been accessed for at least the number
- * of epochs specified in the epochs_before_eviction field, and
- * evict these entries. Conceptually, the maximum cache size
- * is then decreased to match the new actual cache size. However,
- * this reduction may be modified by the min_size, the
- * max_decrement, and/or the empty_reserve.
+ * H5C_decr__age_out: At the end of each epoch, search the cache for
+ * entries that have not been accessed for at least the number
+ * of epochs specified in the epochs_before_eviction field, and
+ * evict these entries. Conceptually, the maximum cache size
+ * is then decreased to match the new actual cache size. However,
+ * this reduction may be modified by the min_size, the
+ * max_decrement, and/or the empty_reserve.
*
- * H5C_decr__age_out_with_threshold: Same as age_out, but we only
- * attempt to reduce the cache size when the hit rate observed
- * over the last epoch exceeds the value provided in the
- * upper_hr_threshold field.
+ * H5C_decr__age_out_with_threshold: Same as age_out, but we only
+ * attempt to reduce the cache size when the hit rate observed
+ * over the last epoch exceeds the value provided in the
+ * upper_hr_threshold field.
*
* upper_hr_threshold: Upper hit rate threshold. The use of this field
- * varies according to the current decr_mode:
+ * varies according to the current decr_mode:
*
- * H5C_decr__off or H5C_decr__age_out: The value of this field is
- * ignored.
+ * H5C_decr__off or H5C_decr__age_out: The value of this field is
+ * ignored.
*
- * H5C_decr__threshold: If the hit rate exceeds this threshold in any
- * epoch, attempt to decrement the cache size by size_decrement.
+ * H5C_decr__threshold: If the hit rate exceeds this threshold in any
+ * epoch, attempt to decrement the cache size by size_decrement.
*
- * Note that cache size may not be decremented below min_size.
+ * Note that cache size may not be decremented below min_size.
*
- * Note also that if the upper_threshold is 1.0, the cache size
- * will never be reduced.
+ * Note also that if the upper_threshold is 1.0, the cache size
+ * will never be reduced.
*
- * H5C_decr__age_out_with_threshold: If the hit rate exceeds this
- * threshold in any epoch, attempt to reduce the cache size
- * by evicting entries that have not been accessed for more
- * than the specified number of epochs.
+ * H5C_decr__age_out_with_threshold: If the hit rate exceeds this
+ * threshold in any epoch, attempt to reduce the cache size
+ * by evicting entries that have not been accessed for more
+ * than the specified number of epochs.
*
* decrement: This field is only used when the decr_mode is
- * H5C_decr__threshold.
+ * H5C_decr__threshold.
*
- * The field is a double containing the multiplier used to derive the
- * new cache size from the old if a cache size decrement is triggered.
- * The decrement must be in the range 0.0 (in which case the cache will
+ * The field is a double containing the multiplier used to derive the
+ * new cache size from the old if a cache size decrement is triggered.
+ * The decrement must be in the range 0.0 (in which case the cache will
* try to contract to its minimum size) to 1.0 (in which case the
* cache will never shrink).
*
* apply_max_decrement: Boolean flag used to determine whether decrements
- * in cache size are to be limited by the max_decrement field.
+ * in cache size are to be limited by the max_decrement field.
*
* max_decrement: Maximum number of bytes by which the cache size can be
- * decreased in a single re-size. Note that decrements may also be
- * restricted by the min_size of the cache, and (in age out modes) by
- * the empty_reserve field.
+ * decreased in a single re-size. Note that decrements may also be
+ * restricted by the min_size of the cache, and (in age out modes) by
+ * the empty_reserve field.
*
* epochs_before_eviction: Integer field used in H5C_decr__age_out and
- * H5C_decr__age_out_with_threshold decrement modes.
+ * H5C_decr__age_out_with_threshold decrement modes.
*
- * This field contains the number of epochs an entry must remain
- * unaccessed before it is evicted in an attempt to reduce the
- * cache size. If applicable, this field must lie in the range
- * [1, H5C__MAX_EPOCH_MARKERS].
+ * This field contains the number of epochs an entry must remain
+ * unaccessed before it is evicted in an attempt to reduce the
+ * cache size. If applicable, this field must lie in the range
+ * [1, H5C__MAX_EPOCH_MARKERS].
*
* apply_empty_reserve: Boolean field controlling whether the empty_reserve
- * field is to be used in computing the new cache size when the
- * decr_mode is H5C_decr__age_out or H5C_decr__age_out_with_threshold.
+ * field is to be used in computing the new cache size when the
+ * decr_mode is H5C_decr__age_out or H5C_decr__age_out_with_threshold.
*
* empty_reserve: To avoid a constant racheting down of cache size by small
- * amounts in the H5C_decr__age_out and H5C_decr__age_out_with_threshold
- * modes, this field allows one to require that any cache size
- * reductions leave the specified fraction of unused space in the cache.
+ * amounts in the H5C_decr__age_out and H5C_decr__age_out_with_threshold
+ * modes, this field allows one to require that any cache size
+ * reductions leave the specified fraction of unused space in the cache.
*
- * The value of this field must be in the range [0.0, 1.0]. I would
- * expect typical values to be in the range of 0.01 to 0.1.
+ * The value of this field must be in the range [0.0, 1.0]. I would
+ * expect typical values to be in the range of 0.01 to 0.1.
*
****************************************************************************/
@@ -2279,12 +2275,12 @@ typedef struct H5C_auto_size_ctl_t {
* The fields of the structure are discussed individually below:
*
* version: Integer field containing the version number of this version
- * of the H5C_image_ctl_t structure. Any instance of
- * H5C_image_ctl_t passed to the cache must have a known
- * version number, or an error will be flagged.
+ * of the H5C_image_ctl_t structure. Any instance of
+ * H5C_image_ctl_t passed to the cache must have a known
+ * version number, or an error will be flagged.
*
* generate_image: Boolean flag indicating whether a cache image should
- * be created on file close.
+ * be created on file close.
*
* save_resize_status: Boolean flag indicating whether the cache image
* should include the adaptive cache resize configuration and status.
@@ -2316,11 +2312,11 @@ typedef struct H5C_auto_size_ctl_t {
* equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
*
* flags: Unsigned integer containing flags controlling which aspects of the
- * cache image functinality is actually executed. The primary impetus
- * behind this field is to allow development of tests for partial
- * implementations that will require little if any modification to run
- * with the full implementation. In normal operation, all flags should
- * be set.
+ * cache image functinality is actually executed. The primary impetus
+ * behind this field is to allow development of tests for partial
+ * implementations that will require little if any modification to run
+ * with the full implementation. In normal operation, all flags should
+ * be set.
*
****************************************************************************/
@@ -2337,10 +2333,11 @@ typedef struct H5C_auto_size_ctl_t {
#define H5C__DEFAULT_CACHE_IMAGE_CTL \
{ \
- /* version = */ H5C__CURR_CACHE_IMAGE_CTL_VER, /* generate_image = */ FALSE, \
- /* save_resize_status = */ FALSE, \
- /* entry_ageout = */ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE, \
- /* flags = */ H5C_CI__ALL_FLAGS \
+ H5C__CURR_CACHE_IMAGE_CTL_VER, /* = version */ \
+ FALSE, /* = generate_image */ \
+ FALSE, /* = save_resize_status */ \
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE, /* = entry_ageout */ \
+ H5C_CI__ALL_FLAGS /* = flags */ \
}
typedef struct H5C_cache_image_ctl_t {
@@ -2384,10 +2381,10 @@ herr_t H5C_verify_tag(int id, haddr_t tag);
H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f);
H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr);
H5_DLL herr_t H5C_get_cache_image_config(const H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr);
-H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
+H5_DLL herr_t H5C_get_cache_size(const H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr);
-H5_DLL herr_t H5C_get_cache_flush_in_progress(H5C_t *cache_ptr, hbool_t *flush_in_progress_ptr);
-H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr);
+H5_DLL herr_t H5C_get_cache_flush_in_progress(const H5C_t *cache_ptr, hbool_t *flush_in_progress_ptr);
+H5_DLL herr_t H5C_get_cache_hit_rate(const H5C_t *cache_ptr, double *hit_rate_ptr);
H5_DLL int H5C_get_curr_io_client_type(H5C_t *cache_ptr);
H5_DLL hbool_t H5C_get_curr_read_speculative(H5C_t *cache_ptr);
H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr, size_t *size_ptr, hbool_t *in_cache_ptr,
@@ -2414,6 +2411,7 @@ H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr);
H5_DLL herr_t H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_image_ctl_t *config_ptr);
H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled);
+H5_DLL herr_t H5C_set_slist_enabled(H5C_t *cache_ptr, hbool_t slist_enabled, hbool_t clear_slist);
H5_DLL herr_t H5C_set_vfd_swmr_reader(H5C_t *cache_ptr, hbool_t vfd_swmr_reader, hsize_t page_size);
H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix);
H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name, hbool_t display_detailed_stats);
@@ -2434,7 +2432,7 @@ H5_DLL herr_t H5C_unsettle_ring(H5F_t *f, H5C_ring_t ring);
H5_DLL herr_t H5C_remove_entry(void *thing);
H5_DLL herr_t H5C_cache_image_status(H5F_t *f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr);
H5_DLL hbool_t H5C_cache_image_pending(const H5C_t *cache_ptr);
-H5_DLL herr_t H5C_get_mdc_image_info(H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len);
+H5_DLL herr_t H5C_get_mdc_image_info(const H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len);
/* Logging functions */
H5_DLL herr_t H5C_start_logging(H5C_t *cache);
diff --git a/src/H5Cquery.c b/src/H5Cquery.c
index 611cf55..d95dd87 100644
--- a/src/H5Cquery.c
+++ b/src/H5Cquery.c
@@ -114,8 +114,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr, size_t *cur_size_ptr,
- uint32_t *cur_num_entries_ptr)
+H5C_get_cache_size(const H5C_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
+ size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -151,7 +151,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_get_cache_flush_in_progress(H5C_t *cache_ptr, hbool_t *flush_in_progress_ptr)
+H5C_get_cache_flush_in_progress(const H5C_t *cache_ptr, hbool_t *flush_in_progress_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -184,7 +184,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr)
+H5C_get_cache_hit_rate(const H5C_t *cache_ptr, double *hit_rate_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -405,7 +405,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_get_mdc_image_info(H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len)
+H5C_get_mdc_image_info(const H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len)
{
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5FDdevelop.h b/src/H5FDdevelop.h
new file mode 100644
index 0000000..5cf30ff
--- /dev/null
+++ b/src/H5FDdevelop.h
@@ -0,0 +1,267 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This file contains public declarations for the H5FD (file drivers) developer
+ * support routines.
+ */
+
+#ifndef _H5FDdevelop_H
+#define _H5FDdevelop_H
+
+/* Include package's public header */
+#include "H5FDpublic.h"
+
+/*****************/
+/* Public Macros */
+/*****************/
+
+/* Map "fractal heap" header blocks to 'ohdr' type file memory, since its
+ * a fair amount of work to add a new kind of file memory and they are similar
+ * enough to object headers and probably too minor to deserve their own type.
+ *
+ * Map "fractal heap" indirect blocks to 'ohdr' type file memory, since they
+ * are similar to fractal heap header blocks.
+ *
+ * Map "fractal heap" direct blocks to 'lheap' type file memory, since they
+ * will be replacing local heaps.
+ *
+ * Map "fractal heap" 'huge' objects to 'draw' type file memory, since they
+ * represent large objects that are directly stored in the file.
+ *
+ * -QAK
+ */
+#define H5FD_MEM_FHEAP_HDR H5FD_MEM_OHDR
+#define H5FD_MEM_FHEAP_IBLOCK H5FD_MEM_OHDR
+#define H5FD_MEM_FHEAP_DBLOCK H5FD_MEM_LHEAP
+#define H5FD_MEM_FHEAP_HUGE_OBJ H5FD_MEM_DRAW
+
+/* Map "free space" header blocks to 'ohdr' type file memory, since its
+ * a fair amount of work to add a new kind of file memory and they are similar
+ * enough to object headers and probably too minor to deserve their own type.
+ *
+ * Map "free space" serialized sections to 'lheap' type file memory, since they
+ * are similar enough to local heap info.
+ *
+ * -QAK
+ */
+#define H5FD_MEM_FSPACE_HDR H5FD_MEM_OHDR
+#define H5FD_MEM_FSPACE_SINFO H5FD_MEM_LHEAP
+
+/* Map "shared object header message" master table to 'ohdr' type file memory,
+ * since its a fair amount of work to add a new kind of file memory and they are
+ * similar enough to object headers and probably too minor to deserve their own
+ * type.
+ *
+ * Map "shared object header message" indices to 'btree' type file memory,
+ * since they are similar enough to B-tree nodes.
+ *
+ * -QAK
+ */
+#define H5FD_MEM_SOHM_TABLE H5FD_MEM_OHDR
+#define H5FD_MEM_SOHM_INDEX H5FD_MEM_BTREE
+
+/* Map "extensible array" header blocks to 'ohdr' type file memory, since its
+ * a fair amount of work to add a new kind of file memory and they are similar
+ * enough to object headers and probably too minor to deserve their own type.
+ *
+ * Map "extensible array" index blocks to 'ohdr' type file memory, since they
+ * are similar to extensible array header blocks.
+ *
+ * Map "extensible array" super blocks to 'btree' type file memory, since they
+ * are similar enough to B-tree nodes.
+ *
+ * Map "extensible array" data blocks & pages to 'lheap' type file memory, since
+ * they are similar enough to local heap info.
+ *
+ * -QAK
+ */
+#define H5FD_MEM_EARRAY_HDR H5FD_MEM_OHDR
+#define H5FD_MEM_EARRAY_IBLOCK H5FD_MEM_OHDR
+#define H5FD_MEM_EARRAY_SBLOCK H5FD_MEM_BTREE
+#define H5FD_MEM_EARRAY_DBLOCK H5FD_MEM_LHEAP
+#define H5FD_MEM_EARRAY_DBLK_PAGE H5FD_MEM_LHEAP
+
+/* Map "fixed array" header blocks to 'ohdr' type file memory, since its
+ * a fair amount of work to add a new kind of file memory and they are similar
+ * enough to object headers and probably too minor to deserve their own type.
+ *
+ * Map "fixed array" data blocks & pages to 'lheap' type file memory, since
+ * they are similar enough to local heap info.
+ *
+ */
+#define H5FD_MEM_FARRAY_HDR H5FD_MEM_OHDR
+#define H5FD_MEM_FARRAY_DBLOCK H5FD_MEM_LHEAP
+#define H5FD_MEM_FARRAY_DBLK_PAGE H5FD_MEM_LHEAP
+
+/*
+ * A free-list map which maps all types of allocation requests to a single
+ * free list. This is useful for drivers that don't really care about
+ * keeping different requests segregated in the underlying file and which
+ * want to make most efficient reuse of freed memory. The use of the
+ * H5FD_MEM_SUPER free list is arbitrary.
+ */
+#define H5FD_FLMAP_SINGLE \
+ { \
+ H5FD_MEM_SUPER, /*default*/ \
+ H5FD_MEM_SUPER, /*super*/ \
+ H5FD_MEM_SUPER, /*btree*/ \
+ H5FD_MEM_SUPER, /*draw*/ \
+ H5FD_MEM_SUPER, /*gheap*/ \
+ H5FD_MEM_SUPER, /*lheap*/ \
+ H5FD_MEM_SUPER /*ohdr*/ \
+ }
+
+/*
+ * A free-list map which segregates requests into `raw' or `meta' data
+ * pools.
+ */
+#define H5FD_FLMAP_DICHOTOMY \
+ { \
+ H5FD_MEM_SUPER, /*default*/ \
+ H5FD_MEM_SUPER, /*super*/ \
+ H5FD_MEM_SUPER, /*btree*/ \
+ H5FD_MEM_DRAW, /*draw*/ \
+ H5FD_MEM_DRAW, /*gheap*/ \
+ H5FD_MEM_SUPER, /*lheap*/ \
+ H5FD_MEM_SUPER /*ohdr*/ \
+ }
+
+/*
+ * The default free list map which causes each request type to use it's own
+ * free-list.
+ */
+#define H5FD_FLMAP_DEFAULT \
+ { \
+ H5FD_MEM_DEFAULT, /*default*/ \
+ H5FD_MEM_DEFAULT, /*super*/ \
+ H5FD_MEM_DEFAULT, /*btree*/ \
+ H5FD_MEM_DEFAULT, /*draw*/ \
+ H5FD_MEM_DEFAULT, /*gheap*/ \
+ H5FD_MEM_DEFAULT, /*lheap*/ \
+ H5FD_MEM_DEFAULT /*ohdr*/ \
+ }
+
+/*******************/
+/* Public Typedefs */
+/*******************/
+
+/* Forward declaration */
+typedef struct H5FD_t H5FD_t;
+
+/* Class information for each file driver */
+typedef struct H5FD_class_t {
+ const char * name;
+ haddr_t maxaddr;
+ H5F_close_degree_t fc_degree;
+ herr_t (*terminate)(void);
+ hsize_t (*sb_size)(H5FD_t *file);
+ herr_t (*sb_encode)(H5FD_t *file, char *name /*out*/, unsigned char *p /*out*/);
+ herr_t (*sb_decode)(H5FD_t *f, const char *name, const unsigned char *p);
+ size_t fapl_size;
+ void *(*fapl_get)(H5FD_t *file);
+ void *(*fapl_copy)(const void *fapl);
+ herr_t (*fapl_free)(void *fapl);
+ size_t dxpl_size;
+ void *(*dxpl_copy)(const void *dxpl);
+ herr_t (*dxpl_free)(void *dxpl);
+ H5FD_t *(*open)(const char *name, unsigned flags, hid_t fapl, haddr_t maxaddr);
+ herr_t (*close)(H5FD_t *file);
+ int (*cmp)(const H5FD_t *f1, const H5FD_t *f2);
+ herr_t (*query)(const H5FD_t *f1, unsigned long *flags);
+ herr_t (*get_type_map)(const H5FD_t *file, H5FD_mem_t *type_map);
+ haddr_t (*alloc)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
+ herr_t (*free)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, hsize_t size);
+ haddr_t (*get_eoa)(const H5FD_t *file, H5FD_mem_t type);
+ herr_t (*set_eoa)(H5FD_t *file, H5FD_mem_t type, haddr_t addr);
+ haddr_t (*get_eof)(const H5FD_t *file, H5FD_mem_t type);
+ herr_t (*get_handle)(H5FD_t *file, hid_t fapl, void **file_handle);
+ herr_t (*read)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, void *buffer);
+ herr_t (*write)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, const void *buffer);
+ herr_t (*flush)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
+ herr_t (*truncate)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
+ herr_t (*lock)(H5FD_t *file, hbool_t rw);
+ herr_t (*unlock)(H5FD_t *file);
+ herr_t (*del)(const char *name, hid_t fapl);
+ H5FD_t *(*dedup)(H5FD_t *, H5FD_t *, hid_t);
+ H5FD_mem_t fl_map[H5FD_MEM_NTYPES];
+} H5FD_class_t;
+
+/* A free list is a singly-linked list of address/size pairs. */
+typedef struct H5FD_free_t {
+ haddr_t addr;
+ hsize_t size;
+ struct H5FD_free_t *next;
+} H5FD_free_t;
+
+/*
+ * The main datatype for each driver. Public fields common to all drivers
+ * are declared here and the driver appends private fields in memory.
+ */
+struct H5FD_t {
+ hid_t driver_id; /*driver ID for this file */
+ const H5FD_class_t *cls; /*constant class info */
+ unsigned long fileno; /* File 'serial' number */
+ unsigned access_flags; /* File access flags (from create or open) */
+ unsigned long feature_flags; /* VFL Driver feature Flags */
+ haddr_t maxaddr; /* For this file, overrides class */
+ haddr_t base_addr; /* Base address for HDF5 data w/in file */
+
+ H5FD_t *exc_owner; /* Pointer to an exclusive owner
+ * or NULL if none.
+ */
+
+ /* Space allocation management fields */
+ hsize_t threshold; /* Threshold for alignment */
+ hsize_t alignment; /* Allocation alignment */
+ hbool_t paged_aggr; /* Paged aggregation for file space is enabled or not */
+};
+
+/********************/
+/* Public Variables */
+/********************/
+
+/*********************/
+/* Public Prototypes */
+/*********************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+H5_DLL hid_t H5FDregister(const H5FD_class_t *cls);
+H5_DLL herr_t H5FDunregister(hid_t driver_id);
+H5_DLL H5FD_t *H5FDopen(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr);
+H5_DLL herr_t H5FDclose(H5FD_t *file);
+H5_DLL int H5FDcmp(const H5FD_t *f1, const H5FD_t *f2);
+H5_DLL int H5FDquery(const H5FD_t *f, unsigned long *flags);
+H5_DLL haddr_t H5FDalloc(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
+H5_DLL herr_t H5FDfree(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, hsize_t size);
+H5_DLL haddr_t H5FDget_eoa(H5FD_t *file, H5FD_mem_t type);
+H5_DLL herr_t H5FDset_eoa(H5FD_t *file, H5FD_mem_t type, haddr_t eoa);
+H5_DLL haddr_t H5FDget_eof(H5FD_t *file, H5FD_mem_t type);
+H5_DLL herr_t H5FDget_vfd_handle(H5FD_t *file, hid_t fapl, void **file_handle);
+H5_DLL herr_t H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
+ void *buf /*out*/);
+H5_DLL herr_t H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
+ const void *buf);
+H5_DLL herr_t H5FDflush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
+H5_DLL herr_t H5FDtruncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
+H5_DLL herr_t H5FDlock(H5FD_t *file, hbool_t rw);
+H5_DLL herr_t H5FDunlock(H5FD_t *file);
+H5_DLL herr_t H5FDdelete(const char *name, hid_t fapl_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _H5FDdevelop_H */
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index 8b87319..1b5815a 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -44,6 +44,29 @@
#define my_strdup strdup
#endif
+/* Macros for enabling/disabling particular GCC warnings
+ *
+ * These are (renamed) duplicates of macros in H5private.h. If you make changes
+ * here, be sure to update those as well.
+ *
+ * (see the following web-sites for more info:
+ * http://www.dbp-consulting.com/tutorials/SuppressingGCCWarnings.html
+ * http://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html#Diagnostic-Pragmas
+ */
+/* These pragmas are only implemented usefully in gcc 4.6+ */
+#if ((__GNUC__ * 100) + __GNUC_MINOR__) >= 406
+#define H5_MULTI_GCC_DIAG_JOINSTR(x, y) x y
+#define H5_MULTI_GCC_DIAG_DO_PRAGMA(x) _Pragma(#x)
+#define H5_MULTI_GCC_DIAG_PRAGMA(x) H5_MULTI_GCC_DIAG_DO_PRAGMA(GCC diagnostic x)
+
+#define H5_MULTI_GCC_DIAG_OFF(x) \
+ H5_MULTI_GCC_DIAG_PRAGMA(push) H5_MULTI_GCC_DIAG_PRAGMA(ignored H5_MULTI_GCC_DIAG_JOINSTR("-W", x))
+#define H5_MULTI_GCC_DIAG_ON(x) H5_MULTI_GCC_DIAG_PRAGMA(pop)
+#else
+#define H5_MULTI_GCC_DIAG_OFF(x)
+#define H5_MULTI_GCC_DIAG_ON(x)
+#endif
+
/* Loop through all mapped files */
#define UNIQUE_MEMBERS_CORE(MAP, ITER, SEEN, LOOPVAR) \
{ \
@@ -1959,7 +1982,7 @@ compute_next(H5FD_multi_t *file)
* tmp in the code below, but early (4.4.7, at least) gcc only
* allows diagnostic pragmas to be toggled outside of functions.
*/
-H5_GCC_DIAG_OFF("format-nonliteral")
+H5_MULTI_GCC_DIAG_OFF("format-nonliteral")
static int
open_members(H5FD_multi_t *file)
{
@@ -2044,7 +2067,7 @@ H5FD_multi_delete(const char *filename, hid_t fapl_id)
return 0;
} /* end H5FD_multi_delete() */
-H5_GCC_DIAG_ON("format-nonliteral")
+H5_MULTI_GCC_DIAG_ON("format-nonliteral")
#ifdef H5private_H
/*
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index 645436b..2e3d270 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -18,8 +18,9 @@
#ifndef H5FDprivate_H
#define H5FDprivate_H
-/* Include package's public header */
+/* Include package's public headers */
#include "H5FDpublic.h"
+#include "H5FDdevelop.h"
/* Private headers needed by this file */
#include "H5Pprivate.h" /* Property lists */
diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h
index 46f8803..0cfb072 100644
--- a/src/H5FDpublic.h
+++ b/src/H5FDpublic.h
@@ -18,140 +18,15 @@
#ifndef H5FDpublic_H
#define H5FDpublic_H
-#include "H5public.h"
-#include "H5Fpublic.h" /*for H5F_close_degree_t */
+/* Public headers needed by this file */
+#include "H5public.h" /* Generic Functions */
+#include "H5Fpublic.h" /* Files */
-#define H5_HAVE_VFL 1 /*define a convenient app feature test*/
-#define H5FD_VFD_DEFAULT 0 /* Default VFL driver value */
-
-/* Types of allocation requests: see H5Fpublic.h */
-typedef enum H5F_mem_t H5FD_mem_t;
-
-/* Map "fractal heap" header blocks to 'ohdr' type file memory, since its
- * a fair amount of work to add a new kind of file memory and they are similar
- * enough to object headers and probably too minor to deserve their own type.
- *
- * Map "fractal heap" indirect blocks to 'ohdr' type file memory, since they
- * are similar to fractal heap header blocks.
- *
- * Map "fractal heap" direct blocks to 'lheap' type file memory, since they
- * will be replacing local heaps.
- *
- * Map "fractal heap" 'huge' objects to 'draw' type file memory, since they
- * represent large objects that are directly stored in the file.
- *
- * -QAK
- */
-#define H5FD_MEM_FHEAP_HDR H5FD_MEM_OHDR
-#define H5FD_MEM_FHEAP_IBLOCK H5FD_MEM_OHDR
-#define H5FD_MEM_FHEAP_DBLOCK H5FD_MEM_LHEAP
-#define H5FD_MEM_FHEAP_HUGE_OBJ H5FD_MEM_DRAW
-
-/* Map "free space" header blocks to 'ohdr' type file memory, since its
- * a fair amount of work to add a new kind of file memory and they are similar
- * enough to object headers and probably too minor to deserve their own type.
- *
- * Map "free space" serialized sections to 'lheap' type file memory, since they
- * are similar enough to local heap info.
- *
- * -QAK
- */
-#define H5FD_MEM_FSPACE_HDR H5FD_MEM_OHDR
-#define H5FD_MEM_FSPACE_SINFO H5FD_MEM_LHEAP
-
-/* Map "shared object header message" master table to 'ohdr' type file memory,
- * since its a fair amount of work to add a new kind of file memory and they are
- * similar enough to object headers and probably too minor to deserve their own
- * type.
- *
- * Map "shared object header message" indices to 'btree' type file memory,
- * since they are similar enough to B-tree nodes.
- *
- * -QAK
- */
-#define H5FD_MEM_SOHM_TABLE H5FD_MEM_OHDR
-#define H5FD_MEM_SOHM_INDEX H5FD_MEM_BTREE
-
-/* Map "extensible array" header blocks to 'ohdr' type file memory, since its
- * a fair amount of work to add a new kind of file memory and they are similar
- * enough to object headers and probably too minor to deserve their own type.
- *
- * Map "extensible array" index blocks to 'ohdr' type file memory, since they
- * are similar to extensible array header blocks.
- *
- * Map "extensible array" super blocks to 'btree' type file memory, since they
- * are similar enough to B-tree nodes.
- *
- * Map "extensible array" data blocks & pages to 'lheap' type file memory, since
- * they are similar enough to local heap info.
- *
- * -QAK
- */
-#define H5FD_MEM_EARRAY_HDR H5FD_MEM_OHDR
-#define H5FD_MEM_EARRAY_IBLOCK H5FD_MEM_OHDR
-#define H5FD_MEM_EARRAY_SBLOCK H5FD_MEM_BTREE
-#define H5FD_MEM_EARRAY_DBLOCK H5FD_MEM_LHEAP
-#define H5FD_MEM_EARRAY_DBLK_PAGE H5FD_MEM_LHEAP
+/*****************/
+/* Public Macros */
+/*****************/
-/* Map "fixed array" header blocks to 'ohdr' type file memory, since its
- * a fair amount of work to add a new kind of file memory and they are similar
- * enough to object headers and probably too minor to deserve their own type.
- *
- * Map "fixed array" data blocks & pages to 'lheap' type file memory, since
- * they are similar enough to local heap info.
- *
- */
-#define H5FD_MEM_FARRAY_HDR H5FD_MEM_OHDR
-#define H5FD_MEM_FARRAY_DBLOCK H5FD_MEM_LHEAP
-#define H5FD_MEM_FARRAY_DBLK_PAGE H5FD_MEM_LHEAP
-
-/*
- * A free-list map which maps all types of allocation requests to a single
- * free list. This is useful for drivers that don't really care about
- * keeping different requests segregated in the underlying file and which
- * want to make most efficient reuse of freed memory. The use of the
- * H5FD_MEM_SUPER free list is arbitrary.
- */
-#define H5FD_FLMAP_SINGLE \
- { \
- H5FD_MEM_SUPER, /*default*/ \
- H5FD_MEM_SUPER, /*super*/ \
- H5FD_MEM_SUPER, /*btree*/ \
- H5FD_MEM_SUPER, /*draw*/ \
- H5FD_MEM_SUPER, /*gheap*/ \
- H5FD_MEM_SUPER, /*lheap*/ \
- H5FD_MEM_SUPER /*ohdr*/ \
- }
-
-/*
- * A free-list map which segregates requests into `raw' or `meta' data
- * pools.
- */
-#define H5FD_FLMAP_DICHOTOMY \
- { \
- H5FD_MEM_SUPER, /*default*/ \
- H5FD_MEM_SUPER, /*super*/ \
- H5FD_MEM_SUPER, /*btree*/ \
- H5FD_MEM_DRAW, /*draw*/ \
- H5FD_MEM_DRAW, /*gheap*/ \
- H5FD_MEM_SUPER, /*lheap*/ \
- H5FD_MEM_SUPER /*ohdr*/ \
- }
-
-/*
- * The default free list map which causes each request type to use it's own
- * free-list.
- */
-#define H5FD_FLMAP_DEFAULT \
- { \
- H5FD_MEM_DEFAULT, /*default*/ \
- H5FD_MEM_DEFAULT, /*super*/ \
- H5FD_MEM_DEFAULT, /*btree*/ \
- H5FD_MEM_DEFAULT, /*draw*/ \
- H5FD_MEM_DEFAULT, /*gheap*/ \
- H5FD_MEM_DEFAULT, /*lheap*/ \
- H5FD_MEM_DEFAULT /*ohdr*/ \
- }
+#define H5FD_VFD_DEFAULT 0 /* Default VFL driver value */
/* Define VFL driver features that can be enabled on a per-driver basis */
/* These are returned with the 'query' function pointer in H5FD_class_t */
@@ -263,76 +138,12 @@ typedef enum H5F_mem_t H5FD_mem_t;
*/
#define H5FD_FEAT_DEFAULT_VFD_COMPATIBLE 0x00008000
-/* Forward declaration */
-typedef struct H5FD_t H5FD_t;
-
-/* Class information for each file driver */
-typedef struct H5FD_class_t {
- const char * name;
- haddr_t maxaddr;
- H5F_close_degree_t fc_degree;
- herr_t (*terminate)(void);
- hsize_t (*sb_size)(H5FD_t *file);
- herr_t (*sb_encode)(H5FD_t *file, char *name /*out*/, unsigned char *p /*out*/);
- herr_t (*sb_decode)(H5FD_t *f, const char *name, const unsigned char *p);
- size_t fapl_size;
- void *(*fapl_get)(H5FD_t *file);
- void *(*fapl_copy)(const void *fapl);
- herr_t (*fapl_free)(void *fapl);
- size_t dxpl_size;
- void *(*dxpl_copy)(const void *dxpl);
- herr_t (*dxpl_free)(void *dxpl);
- H5FD_t *(*open)(const char *name, unsigned flags, hid_t fapl, haddr_t maxaddr);
- herr_t (*close)(H5FD_t *file);
- int (*cmp)(const H5FD_t *f1, const H5FD_t *f2);
- herr_t (*query)(const H5FD_t *f1, unsigned long *flags);
- herr_t (*get_type_map)(const H5FD_t *file, H5FD_mem_t *type_map);
- haddr_t (*alloc)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
- herr_t (*free)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, hsize_t size);
- haddr_t (*get_eoa)(const H5FD_t *file, H5FD_mem_t type);
- herr_t (*set_eoa)(H5FD_t *file, H5FD_mem_t type, haddr_t addr);
- haddr_t (*get_eof)(const H5FD_t *file, H5FD_mem_t type);
- herr_t (*get_handle)(H5FD_t *file, hid_t fapl, void **file_handle);
- herr_t (*read)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, void *buffer);
- herr_t (*write)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, const void *buffer);
- herr_t (*flush)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
- herr_t (*truncate)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
- herr_t (*lock)(H5FD_t *file, hbool_t rw);
- herr_t (*unlock)(H5FD_t *file);
- herr_t (*del)(const char *name, hid_t fapl);
- H5FD_t *(*dedup)(H5FD_t *, H5FD_t *, hid_t);
- H5FD_mem_t fl_map[H5FD_MEM_NTYPES];
-} H5FD_class_t;
-
-/* A free list is a singly-linked list of address/size pairs. */
-typedef struct H5FD_free_t {
- haddr_t addr;
- hsize_t size;
- struct H5FD_free_t *next;
-} H5FD_free_t;
-
-/*
- * The main datatype for each driver. Public fields common to all drivers
- * are declared here and the driver appends private fields in memory.
- */
-struct H5FD_t {
- hid_t driver_id; /*driver ID for this file */
- const H5FD_class_t *cls; /*constant class info */
- unsigned long fileno; /* File 'serial' number */
- unsigned access_flags; /* File access flags (from create or open) */
- unsigned long feature_flags; /* VFL Driver feature Flags */
- haddr_t maxaddr; /* For this file, overrides class */
- haddr_t base_addr; /* Base address for HDF5 data w/in file */
-
- H5FD_t *exc_owner; /* Pointer to an exclusive owner
- * or NULL if none.
- */
+/*******************/
+/* Public Typedefs */
+/*******************/
- /* Space allocation management fields */
- hsize_t threshold; /* Threshold for alignment */
- hsize_t alignment; /* Allocation alignment */
- hbool_t paged_aggr; /* Paged aggregation for file space is enabled or not */
-};
+/* Types of allocation requests: see H5Fpublic.h */
+typedef enum H5F_mem_t H5FD_mem_t;
/**
* Define enum for the source of file image callbacks
@@ -442,33 +253,19 @@ typedef struct {
} H5FD_file_image_callbacks_t;
//! <!-- [H5FD_file_image_callbacks_t_snip] -->
+/********************/
+/* Public Variables */
+/********************/
+
+/*********************/
+/* Public Prototypes */
+/*********************/
+
#ifdef __cplusplus
extern "C" {
#endif
/* Function prototypes */
-H5_DLL hid_t H5FDregister(const H5FD_class_t *cls);
-H5_DLL herr_t H5FDunregister(hid_t driver_id);
-H5_DLL H5FD_t *H5FDopen(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr);
-H5_DLL herr_t H5FDclose(H5FD_t *file);
-H5_DLL int H5FDcmp(const H5FD_t *f1, const H5FD_t *f2);
-H5_DLL int H5FDquery(const H5FD_t *f, unsigned long *flags);
-H5_DLL haddr_t H5FDalloc(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
-H5_DLL herr_t H5FDfree(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, hsize_t size);
-H5_DLL haddr_t H5FDget_eoa(H5FD_t *file, H5FD_mem_t type);
-H5_DLL herr_t H5FDset_eoa(H5FD_t *file, H5FD_mem_t type, haddr_t eoa);
-H5_DLL haddr_t H5FDget_eof(H5FD_t *file, H5FD_mem_t type);
-H5_DLL herr_t H5FDget_vfd_handle(H5FD_t *file, hid_t fapl, void **file_handle);
-H5_DLL herr_t H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
- void *buf /*out*/);
-H5_DLL herr_t H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
- const void *buf);
-H5_DLL herr_t H5FDflush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
-H5_DLL herr_t H5FDtruncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
-H5_DLL herr_t H5FDlock(H5FD_t *file, hbool_t rw);
-H5_DLL herr_t H5FDunlock(H5FD_t *file);
-H5_DLL herr_t H5FDdelete(const char *name, hid_t fapl_id);
-
/* Allows querying a VFD ID for features before the file is opened */
H5_DLL herr_t H5FDdriver_query(hid_t driver_id, unsigned long *flags /*out*/);
diff --git a/src/H5Fint.c b/src/H5Fint.c
index ae16626..44506b6 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -81,6 +81,7 @@ static int H5F__get_objects_cb(void *obj_ptr, hid_t obj_id, void *key);
static herr_t H5F__build_name(const char *prefix, const char *file_name, char **full_name /*out*/);
static char * H5F__getenv_prefix_name(char **env_prefix /*in,out*/);
static H5F_t *H5F__new(H5F_shared_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t *lf);
+static herr_t H5F__check_if_using_file_locks(H5P_genplist_t *fapl, hbool_t *use_file_locking);
static herr_t H5F__build_actual_name(const H5F_t *f, const H5P_genplist_t *fapl, const char *name,
char ** /*out*/ actual_name);
static herr_t H5F__flush_phase1(H5F_t *f);
@@ -414,14 +415,15 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref)
efc_size = H5F__efc_max_nfiles(f->shared->efc);
if (H5P_set(new_plist, H5F_ACS_EFC_SIZE_NAME, &efc_size) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set elink file cache size")
- if (f->shared->pb_ptr != NULL) {
- if (H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_SIZE_NAME, &(f->shared->pb_ptr->max_size)) < 0)
+ if (f->shared->page_buf != NULL) {
+ if (H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_SIZE_NAME, &(f->shared->page_buf->max_size)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID, "can't set page buffer size")
- if (H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME, &(f->shared->pb_ptr->min_meta_perc)) <
- 0)
+ if (H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME,
+ &(f->shared->page_buf->min_meta_perc)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID,
"can't set minimum metadata fraction of page buffer")
- if (H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, &(f->shared->pb_ptr->min_raw_perc)) < 0)
+ if (H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, &(f->shared->page_buf->min_raw_perc)) <
+ 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, H5I_INVALID_HID,
"can't set minimum raw data fraction of page buffer")
} /* end if */
@@ -2315,6 +2317,11 @@ H5F__flush_phase2(H5F_t *f, hbool_t closing)
/* Sanity check arguments */
HDassert(f);
+ /* Inform the metadata cache that we are about to flush */
+ if (H5AC_prep_for_file_flush(f) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "prep for MDC flush failed")
+
/* Flush the entire metadata cache */
if (H5AC_flush(f) < 0)
/* Push error, but keep going*/
@@ -2347,6 +2354,11 @@ H5F__flush_phase2(H5F_t *f, hbool_t closing)
H5CX_set_mpi_file_flushing(FALSE);
#endif /* H5_HAVE_PARALLEL */
+ /* Inform the metadata cache that we are done with the flush */
+ if (H5AC_secure_from_file_flush(f) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "secure from MDC flush failed")
+
/* Flush out the metadata accumulator */
if (H5F__accum_flush(f->shared) < 0)
/* Push error, but keep going*/
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 5ee6b7c..71c27f2 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -32,8 +32,8 @@
/* Other private headers needed by this file */
#include "H5private.h" /* Generic Functions */
#include "H5ACprivate.h" /* Metadata cache */
-#include "H5FDprivate.h" /* VFD -- for VFD SWMR */
#include "H5Bprivate.h" /* B-trees */
+#include "H5FDprivate.h" /* File drivers */
#include "H5FLprivate.h" /* Free Lists */
#include "H5FOprivate.h" /* File objects */
#include "H5FSprivate.h" /* File free space */
@@ -305,9 +305,7 @@ struct H5F_shared_t {
unsigned long feature_flags; /* VFL Driver feature Flags */
haddr_t maxaddr; /* Maximum address for file */
- H5PB_t *pb_ptr; /* pointer to the page buffer, or NULL */
- /* if the page buffer is disabled. */
-
+ H5PB_t * page_buf; /* The page buffer cache */
H5AC_t * cache; /* The object cache */
H5AC_cache_config_t mdc_initCacheCfg; /* initial configuration for the */
/* metadata cache. This structure is */
@@ -473,7 +471,8 @@ struct H5F_shared_t {
/* Delayed free space release doubly linked list */
shadow_defree_queue_t shadow_defrees;
- char * extpath; /* Path for searching target external link file */
+
+ char *extpath; /* Path for searching target external link file */
#ifdef H5_HAVE_PARALLEL
H5P_coll_md_read_flag_t coll_md_read; /* Do all metadata reads collectively */
diff --git a/src/H5Fvfd_swmr.c b/src/H5Fvfd_swmr.c
index 3b5eb40..13170af 100644
--- a/src/H5Fvfd_swmr.c
+++ b/src/H5Fvfd_swmr.c
@@ -388,7 +388,6 @@ H5F_update_vfd_swmr_metadata_file(H5F_t *f, uint32_t num_entries, H5FD_vfd_swmr_
haddr_t md_addr; /* Address in the metadata file */
uint32_t i; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
- hbool_t queue_was_nonempty;
FUNC_ENTER_NOAPI(FAIL)
@@ -473,8 +472,6 @@ H5F_update_vfd_swmr_metadata_file(H5F_t *f, uint32_t num_entries, H5FD_vfd_swmr_
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "fail to construct & write header to md")
- queue_was_nonempty = !TAILQ_EMPTY(&shared->shadow_defrees);
-
/*
* Release time out entries from the delayed list by scanning the
* list from the bottom up:
@@ -633,7 +630,7 @@ H5F_vfd_swmr_writer__prep_for_flush_or_close(H5F_t *f)
HDassert(shared->vfd_swmr);
HDassert(shared->vfd_swmr_writer);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
/* since we are about to flush the page buffer, force and end of
* tick so as to avoid attempts to flush entries on the page buffer
@@ -643,7 +640,7 @@ H5F_vfd_swmr_writer__prep_for_flush_or_close(H5F_t *f)
HGOTO_ERROR(H5E_FILE, H5E_SYSTEM, FAIL, "H5F_vfd_swmr_writer_end_of_tick() failed.")
- while (shared->pb_ptr->dwl_len > 0) {
+ while (shared->page_buf->dwl_len > 0) {
if (H5F__vfd_swmr_writer__wait_a_tick(f) < 0)
@@ -761,7 +758,7 @@ H5F_vfd_swmr_writer_end_of_tick(H5F_t *f, hbool_t wait_for_reader)
FUNC_ENTER_NOAPI(FAIL)
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
HDassert(shared->vfd_swmr_writer);
if (!vfd_swmr_writer_may_increase_tick_to(shared->tick_num + 1, wait_for_reader))
@@ -793,9 +790,17 @@ H5F_vfd_swmr_writer_end_of_tick(H5F_t *f, hbool_t wait_for_reader)
if (shared->cache) {
+ if (H5AC_prep_for_file_flush(f) < 0)
+
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "prep for MDC flush failed")
+
if (H5AC_flush(f) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush metadata cache to the page buffer")
+
+ if (H5AC_secure_from_file_flush(f) < 0)
+
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "secure from MDC flush failed")
}
if (H5FD_truncate(shared->lf, FALSE) < 0)
@@ -806,9 +811,17 @@ H5F_vfd_swmr_writer_end_of_tick(H5F_t *f, hbool_t wait_for_reader)
/* 2) If it exists, flush the metadata cache to the page buffer. */
if (shared->cache) {
+ if (H5AC_prep_for_file_flush(f) < 0)
+
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "prep for MDC flush failed")
+
if (H5AC_flush(f) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush metadata cache to the page buffer")
+
+ if (H5AC_secure_from_file_flush(f) < 0)
+
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "secure from MDC flush failed")
}
/* 3) If this is the first tick (i.e. tick == 1), create the
@@ -1000,7 +1013,7 @@ H5F_vfd_swmr_reader_end_of_tick(H5F_t *f, hbool_t entering_api)
FUNC_ENTER_NOAPI(FAIL)
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
HDassert(shared->vfd_swmr);
HDassert(!shared->vfd_swmr_writer);
HDassert(file);
@@ -1170,7 +1183,7 @@ H5F_vfd_swmr_reader_end_of_tick(H5F_t *f, hbool_t entering_api)
entries_removed++;
}
for (i = 0; i < nchanges; i++) {
- haddr_t page_addr = (haddr_t)(change[i].pgno * shared->pb_ptr->page_size);
+ haddr_t page_addr = (haddr_t)(change[i].pgno * shared->page_buf->page_size);
if (H5PB_remove_entry(shared, page_addr) < 0) {
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "remove page buffer entry failed");
}
@@ -1360,7 +1373,7 @@ H5F_dump_eot_queue(void)
for (curr = TAILQ_FIRST(&eot_queue_g), i = 0; curr != NULL; curr = TAILQ_NEXT(curr, link), i++) {
HDfprintf(stderr, "%d: %s tick_num %" PRIu64 ", end_of_tick %jd.%09ld, vfd_swmr_file %p\n", i,
curr->vfd_swmr_writer ? "writer" : "not writer", curr->tick_num, curr->end_of_tick.tv_sec,
- curr->end_of_tick.tv_nsec, curr->vfd_swmr_file);
+ curr->end_of_tick.tv_nsec, (void *)curr->vfd_swmr_file);
}
if (i == 0)
@@ -1798,12 +1811,9 @@ done:
static herr_t
H5F__vfd_swmr_writer__wait_a_tick(H5F_t *f)
{
- int result;
- struct timespec req;
- struct timespec rem;
- uint64_t tick_in_nsec;
- H5F_shared_t * shared;
- herr_t ret_value = SUCCEED; /* Return value */
+ uint64_t tick_in_nsec;
+ H5F_shared_t *shared;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
diff --git a/src/H5HG.c b/src/H5HG.c
index c1c574c..f474748 100644
--- a/src/H5HG.c
+++ b/src/H5HG.c
@@ -598,7 +598,7 @@ H5HG_read(H5F_t *f, H5HG_t *hobj, void *object /*out*/, size_t *buf_size)
if (NULL == (heap = H5HG__protect(f, hobj->addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap")
- if (hobj->idx >= heap->nused && H5HG_trap("out of bounds"))
+ if (hobj->idx >= heap->nused)
HGOTO_ERROR(H5E_HEAP, H5E_BADRANGE, NULL, "address out of bounds")
HDassert(hobj->idx < heap->nused);
diff --git a/src/H5HGprivate.h b/src/H5HGprivate.h
index 6811ea8..d8e6b46 100644
--- a/src/H5HGprivate.h
+++ b/src/H5HGprivate.h
@@ -70,6 +70,4 @@ H5_DLL size_t H5HG_get_free_size(const H5HG_heap_t *h);
/* Debugging functions */
H5_DLL herr_t H5HG_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth);
-bool H5HG_trap(const char *);
-
#endif /* H5HGprivate_H */
diff --git a/src/H5HGtrap.c b/src/H5HGtrap.c
deleted file mode 100644
index e791829..0000000
--- a/src/H5HGtrap.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/****************/
-/* Module Setup */
-/****************/
-
-#include "H5HGmodule.h" /* This source code file is part of the H5HG module */
-
-/*
- * Headers
- */
-#include "H5private.h" /* Generic Functions */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5HGpkg.h" /* Global heaps */
-
-/* H5HG_trap() is an instrumentation point for the global heap.
- * The H5HG_trap() result modifies the global heap's treatment of
- * an unexpected condition that ordinarily would cause an
- * HDassert() statement to abort the program.
- *
- * Currently, just one function, H5HG_read(), calls H5HG_trap(), using
- * the `reason` string "out of bounds".
- *
- * Test programs such as test/vfd_swmr_vlstr_{reader,writer}.c provide
- * their own H5HG_trap() implementation that overrides the one in the library.
- *
- * H5HG_trap() returns `true` if the caller should generate an error-stack
- * entry and return an error code to the caller's caller.
- *
- * H5HG_trap() returns `false` if the caller should blithely carry on;
- * if NDEBUG is not #defined, then the caller will ordinarily abort the
- * program in a subsequent HDassert() statement.
- */
-bool
-H5HG_trap(const char *reason)
-{
- return false;
-}
diff --git a/src/H5Idevelop.h b/src/H5Idevelop.h
new file mode 100644
index 0000000..3cd951e
--- /dev/null
+++ b/src/H5Idevelop.h
@@ -0,0 +1,139 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This file contains public declarations for the H5I (ID management) developer
+ * support routines.
+ */
+
+#ifndef _H5Idevelop_H
+#define _H5Idevelop_H
+
+/* Include package's public header */
+#include "H5Ipublic.h" /* ID management */
+
+/*****************/
+/* Public Macros */
+/*****************/
+
+/*******************/
+/* Public Typedefs */
+/*******************/
+
+/**
+ * The type of the realize_cb callback for H5Iregister_future
+ */
+//! <!-- [H5I_future_realize_func_t_snip] -->
+typedef herr_t (*H5I_future_realize_func_t)(void *future_object, hid_t *actual_object_id);
+//! <!-- [H5I_future_realize_func_t_snip] -->
+
+/**
+ * The type of the discard_cb callback for H5Iregister_future
+ */
+//! <!-- [H5I_future_discard_func_t_snip] -->
+typedef herr_t (*H5I_future_discard_func_t)(void *future_object);
+//! <!-- [H5I_future_discard_func_t_snip] -->
+
+/********************/
+/* Public Variables */
+/********************/
+
+/*********************/
+/* Public Prototypes */
+/*********************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \ingroup H5I
+ *
+ * \brief Registers a "future" object under a type and returns an ID for it
+ *
+ * \param[in] type The identifier of the type of the new ID
+ * \param[in] object Pointer to "future" object for which a new ID is created
+ * \param[in] realize_cb Function pointer to realize a future object
+ * \param[in] discard_cb Function pointer to destroy a future object
+ *
+ * \return \hid_t{object}
+ *
+ * \details H5Iregister_future() creates and returns a new ID for a "future" object.
+ * Future objects are a special kind of object and represent a
+ * placeholder for an object that has not yet been created or opened.
+ * The \p realize_cb will be invoked by the HDF5 library to 'realize'
+ * the future object as an actual object. A call to H5Iobject_verify()
+ * will invoke the \p realize_cb callback and if it successfully
+ * returns, will return the actual object, not the future object.
+ *
+ * \details The \p type parameter is the identifier for the ID type to which
+ * this new future ID will belong. This identifier may have been created
+ * by a call to H5Iregister_type() or may be one of the HDF5 pre-defined
+ * ID classes (e.g. H5I_FILE, H5I_GROUP, H5I_DATASPACE, etc).
+ *
+ * \details The \p object parameter is a pointer to the memory which the new ID
+ * will be a reference to. This pointer will be stored by the library,
+ * but will not be returned to a call to H5Iobject_verify() until the
+ * \p realize_cb callback has returned the actual pointer for the object.
+ *
+ * A NULL value for \p object is allowed.
+ *
+ * \details The \p realize_cb parameter is a function pointer that will be
+ * invoked by the HDF5 library to convert a future object into an
+ * actual object. The \p realize_cb function may be invoked by
+ * H5Iobject_verify() to return the actual object for a user-defined
+ * ID class (i.e. an ID class registered with H5Iregister_type()) or
+ * internally by the HDF5 library in order to use or get information
+ * from an HDF5 pre-defined ID type. For example, the \p realize_cb
+ * for a future dataspace object will be called during the process
+ * of returning information from H5Sget_simple_extent_dims().
+ *
+ * Note that although the \p realize_cb routine returns
+ * an ID (as a parameter) for the actual object, the HDF5 library
+ * will swap the actual object in that ID for the future object in
+ * the future ID. This ensures that the ID value for the object
+ * doesn't change for the user when the object is realized.
+ *
+ * Note that the \p realize_cb callback could receive a NULL value
+ * for a future object pointer, if one was used when H5Iregister_future()
+ * was initially called. This is permitted as a means of allowing
+ * the \p realize_cb to act as a generator of new objects, without
+ * requiring creation of unnecessary future objects.
+ *
+ * It is an error to pass NULL for \p realize_cb.
+ *
+ * \details The \p discard_cb parameter is a function pointer that will be
+ * invoked by the HDF5 library to destroy a future object. This
+ * callback will always be invoked for _every_ future object, whether
+ * the \p realize_cb is invoked on it or not. It's possible that
+ * the \p discard_cb is invoked on a future object without the
+ * \p realize_cb being invoked, e.g. when a future ID is closed without
+ * requiring the future object to be realized into an actual one.
+ *
+ * Note that the \p discard_cb callback could receive a NULL value
+ * for a future object pointer, if one was used when H5Iregister_future()
+ * was initially called.
+ *
+ * It is an error to pass NULL for \p discard_cb.
+ *
+ * \note The H5Iregister_future() function is primarily targeted at VOL connector
+ * authors and is _not_ designed for general-purpose application use.
+ *
+ */
+H5_DLL hid_t H5Iregister_future(H5I_type_t type, const void *object, H5I_future_realize_func_t realize_cb,
+ H5I_future_discard_func_t discard_cb);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _H5Idevelop_H */
diff --git a/src/H5Iprivate.h b/src/H5Iprivate.h
index d1b6248..831ff76 100644
--- a/src/H5Iprivate.h
+++ b/src/H5Iprivate.h
@@ -20,8 +20,9 @@
#ifndef H5Iprivate_H
#define H5Iprivate_H
-/* Include package's public header */
+/* Include package's public headers */
#include "H5Ipublic.h"
+#include "H5Idevelop.h"
/* Private headers needed by this file */
#include "H5private.h"
diff --git a/src/H5Ipublic.h b/src/H5Ipublic.h
index 8e5e167..a5f830b 100644
--- a/src/H5Ipublic.h
+++ b/src/H5Ipublic.h
@@ -99,20 +99,6 @@ typedef int (*H5I_search_func_t)(void *obj, hid_t id, void *key);
typedef herr_t (*H5I_iterate_func_t)(hid_t id, void *udata);
//! <!-- [H5I_iterate_func_t_snip] -->
-/**
- * The type of the realize_cb callback for H5Iregister_future
- */
-//! <!-- [H5I_future_realize_func_t_snip] -->
-typedef herr_t (*H5I_future_realize_func_t)(void *future_object, hid_t *actual_object_id);
-//! <!-- [H5I_future_realize_func_t_snip] -->
-
-/**
- * The type of the discard_cb callback for H5Iregister_future
- */
-//! <!-- [H5I_future_discard_func_t_snip] -->
-typedef herr_t (*H5I_future_discard_func_t)(void *future_object);
-//! <!-- [H5I_future_discard_func_t_snip] -->
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -144,82 +130,6 @@ H5_DLL hid_t H5Iregister(H5I_type_t type, const void *object);
/**
* \ingroup H5I
*
- * \brief Registers a "future" object under a type and returns an ID for it
- *
- * \param[in] type The identifier of the type of the new ID
- * \param[in] object Pointer to "future" object for which a new ID is created
- * \param[in] realize_cb Function pointer to realize a future object
- * \param[in] discard_cb Function pointer to destroy a future object
- *
- * \return \hid_t{object}
- *
- * \details H5Iregister_future() creates and returns a new ID for a "future" object.
- * Future objects are a special kind of object and represent a
- * placeholder for an object that has not yet been created or opened.
- * The \p realize_cb will be invoked by the HDF5 library to 'realize'
- * the future object as an actual object. A call to H5Iobject_verify()
- * will invoke the \p realize_cb callback and if it successfully
- * returns, will return the actual object, not the future object.
- *
- * \details The \p type parameter is the identifier for the ID type to which
- * this new future ID will belong. This identifier may have been created
- * by a call to H5Iregister_type() or may be one of the HDF5 pre-defined
- * ID classes (e.g. H5I_FILE, H5I_GROUP, H5I_DATASPACE, etc).
- *
- * \details The \p object parameter is a pointer to the memory which the new ID
- * will be a reference to. This pointer will be stored by the library,
- * but will not be returned to a call to H5Iobject_verify() until the
- * \p realize_cb callback has returned the actual pointer for the object.
- *
- * A NULL value for \p object is allowed.
- *
- * \details The \p realize_cb parameter is a function pointer that will be
- * invoked by the HDF5 library to convert a future object into an
- * actual object. The \p realize_cb function may be invoked by
- * H5Iobject_verify() to return the actual object for a user-defined
- * ID class (i.e. an ID class registered with H5Iregister_type()) or
- * internally by the HDF5 library in order to use or get information
- * from an HDF5 pre-defined ID type. For example, the \p realize_cb
- * for a future dataspace object will be called during the process
- * of returning information from H5Sget_simple_extent_dims().
- *
- * Note that although the \p realize_cb routine returns
- * an ID (as a parameter) for the actual object, the HDF5 library
- * will swap the actual object in that ID for the future object in
- * the future ID. This ensures that the ID value for the object
- * doesn't change for the user when the object is realized.
- *
- * Note that the \p realize_cb callback could receive a NULL value
- * for a future object pointer, if one was used when H5Iregister_future()
- * was initially called. This is permitted as a means of allowing
- * the \p realize_cb to act as a generator of new objects, without
- * requiring creation of unnecessary future objects.
- *
- * It is an error to pass NULL for \p realize_cb.
- *
- * \details The \p discard_cb parameter is a function pointer that will be
- * invoked by the HDF5 library to destroy a future object. This
- * callback will always be invoked for _every_ future object, whether
- * the \p realize_cb is invoked on it or not. It's possible that
- * the \p discard_cb is invoked on a future object without the
- * \p realize_cb being invoked, e.g. when a future ID is closed without
- * requiring the future object to be realized into an actual one.
- *
- * Note that the \p discard_cb callback could receive a NULL value
- * for a future object pointer, if one was used when H5Iregister_future()
- * was initially called.
- *
- * It is an error to pass NULL for \p discard_cb.
- *
- * \note The H5Iregister_future() function is primarily targeted at VOL connector
- * authors and is _not_ designed for general-purpose application use.
- *
- */
-H5_DLL hid_t H5Iregister_future(H5I_type_t type, const void *object, H5I_future_realize_func_t realize_cb,
- H5I_future_discard_func_t discard_cb);
-/**
- * \ingroup H5I
- *
* \brief Returns the object referenced by an ID
*
* \param[in] id ID to be dereferenced
diff --git a/src/H5Ldevelop.h b/src/H5Ldevelop.h
new file mode 100644
index 0000000..43ed7de
--- /dev/null
+++ b/src/H5Ldevelop.h
@@ -0,0 +1,314 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This file contains public declarations for the H5L (link) developer
+ * support routines.
+ */
+
+#ifndef _H5Ldevelop_H
+#define _H5Ldevelop_H
+
+/* Include package's public header */
+#include "H5Lpublic.h"
+
+/*****************/
+/* Public Macros */
+/*****************/
+
+/**
+ * \brief Current version of the H5L_class_t struct
+ */
+#define H5L_LINK_CLASS_T_VERS 1
+
+/*******************/
+/* Public Typedefs */
+/*******************/
+
+/* The H5L_class_t struct can be used to override the behavior of a
+ * "user-defined" link class. Users should populate the struct with callback
+ * functions defined below.
+ */
+/* Callback prototypes for user-defined links */
+/**
+ * \brief Link creation callback
+ */
+typedef herr_t (*H5L_create_func_t)(const char *link_name, hid_t loc_group, const void *lnkdata,
+ size_t lnkdata_size, hid_t lcpl_id);
+/**
+ * \brief Callback for link move
+ */
+typedef herr_t (*H5L_move_func_t)(const char *new_name, hid_t new_loc, const void *lnkdata,
+ size_t lnkdata_size);
+/**
+ * \brief Callback for link copy
+ */
+typedef herr_t (*H5L_copy_func_t)(const char *new_name, hid_t new_loc, const void *lnkdata,
+ size_t lnkdata_size);
+/**
+ * \brief Callback during link traversal
+ */
+typedef hid_t (*H5L_traverse_func_t)(const char *link_name, hid_t cur_group, const void *lnkdata,
+ size_t lnkdata_size, hid_t lapl_id, hid_t dxpl_id);
+/**
+ * \brief Callback for link deletion
+ */
+typedef herr_t (*H5L_delete_func_t)(const char *link_name, hid_t file, const void *lnkdata,
+ size_t lnkdata_size);
+/**
+ * \brief Callback for querying the link.
+ *
+ * Returns the size of the buffer needed.
+ */
+typedef ssize_t (*H5L_query_func_t)(const char *link_name, const void *lnkdata, size_t lnkdata_size,
+ void *buf /*out*/, size_t buf_size);
+
+/**
+ * \brief Link prototype
+ *
+ * The H5L_class_t struct can be used to override the behavior of a
+ * "user-defined" link class. Users should populate the struct with callback
+ * functions defined elsewhere.
+ */
+//! <!-- [H5L_class_t_snip] -->
+typedef struct {
+ int version; /**< Version number of this struct */
+ H5L_type_t id; /**< Link type ID */
+ const char * comment; /**< Comment for debugging */
+ H5L_create_func_t create_func; /**< Callback during link creation */
+ H5L_move_func_t move_func; /**< Callback after moving link */
+ H5L_copy_func_t copy_func; /**< Callback after copying link */
+ H5L_traverse_func_t trav_func; /**< Callback during link traversal */
+ H5L_delete_func_t del_func; /**< Callback for link deletion */
+ H5L_query_func_t query_func; /**< Callback for queries */
+} H5L_class_t;
+//! <!-- [H5L_class_t_snip] -->
+
+/********************/
+/* Public Variables */
+/********************/
+
+/*********************/
+/* Public Prototypes */
+/*********************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \ingroup H5LA
+ *
+ * \brief Registers a user-defined link class or changes behavior of an
+ * existing class
+ *
+ * \param[in] cls Pointer to a buffer containing the struct describing the
+ * user-defined link class
+ *
+ * \return \herr_t
+ *
+ * \details H5Lregister() registers a class of user-defined links, or changes
+ * the behavior of an existing class.
+ *
+ * \p cls is a pointer to a buffer containing a copy of the
+ * H5L_class_t struct. This struct is defined in H5Lpublic.h as
+ * follows:
+ * \snippet this H5L_class_t_snip
+ *
+ * The class definition passed with \p cls must include at least the
+ * following:
+ * \li An H5L_class_t version (which should be #H5L_LINK_CLASS_T_VERS)
+ * \li A link class identifier, \c class_id
+ * \li A traversal function, \c trav_func
+ *
+ * Remaining \c struct members are optional and may be passed as NULL.
+ *
+ * The link class passed in \c class_id must be in the user-definable
+ * range between #H5L_TYPE_UD_MIN and #H5L_TYPE_UD_MAX
+ * (see the table below) and will override
+ * any existing link class with that identifier.
+ *
+ * As distributed, valid values of \c class_id used in HDF5 include
+ * the following (defined in H5Lpublic.h):
+ * \link_types
+ *
+ * The hard and soft link class identifiers cannot be modified or
+ * reassigned, but the external link class is implemented as an
+ * example in the user-definable link class identifier range.
+ * H5Lregister() is used to register additional link classes. It could
+ * also be used to modify the behavior of the external link class,
+ * though that is not recommended.
+ *
+ * The following table summarizes existing link types and values and
+ * the reserved and user-definable link class identifier value ranges.
+ * <table>
+ * <tr>
+ * <th>Link class identifier or Value range</th>
+ * <th>Description</th>
+ * <th>Link class or label</th>
+ * </tr>
+ * <tr>
+ * <td>0 to 63</td>
+ * <td>Reserved range</td>
+ * <td></td>
+ * </tr>
+ * <tr>
+ * <td>64 to 255</td>
+ * <td>User-definable range</td>
+ * <td></td>
+ * </tr>
+ * <tr>
+ * <td>64</td>
+ * <td>Minimum user-defined value</td>
+ * <td>#H5L_TYPE_UD_MIN</td>
+ * </tr>
+ * <tr>
+ * <td>64</td>
+ * <td>External link</td>
+ * <td>#H5L_TYPE_EXTERNAL</td>
+ * </tr>
+ * <tr>
+ * <td>255</td>
+ * <td>Maximum user-defined value</td>
+ * <td>#H5L_TYPE_UD_MAX</td>
+ * </tr>
+ * <tr>
+ * <td>255</td>
+ * <td>Maximum value</td>
+ * <td>#H5L_TYPE_MAX</td>
+ * </tr>
+ * <tr>
+ * <td>-1</td>
+ * <td>Error</td>
+ * <td>#H5L_TYPE_ERROR</td>
+ * </tr>
+ * </table>
+ *
+ * Note that HDF5 internally registers user-defined link classes only
+ * by the numeric value of the link class identifier. An application,
+ * on the other hand, will generally use a name for a user-defined
+ * class, if for no other purpose than as a variable name. Assume,
+ * for example, that a complex link type is registered with the link
+ * class identifier 73 and that the code includes the following
+ * assignment:
+ * \code
+ * H5L_TYPE_COMPLEX_A = 73
+ * \endcode
+ * The application can refer to the link class with a term,
+ * \c H5L_TYPE_COMPLEX_A, that conveys meaning to a human reviewing
+ * the code, while HDF5 recognizes it by the more cryptic numeric
+ * identifier, 73.
+ *
+ * \attention Important details and considerations include the following:
+ * \li If you plan to distribute files or software with a
+ * user-defined link class, please contact the Help Desk at
+ * The HDF Group to help prevent collisions between \c class_id
+ * values. See below.
+ * \li As distributed with HDF5, the external link class is
+ * implemented as an example of a user-defined link class with
+ * #H5L_TYPE_EXTERNAL equal to #H5L_TYPE_UD_MIN. \c class_id in
+ * the H5L_class_t \c struct must not equal #H5L_TYPE_UD_MIN
+ * unless you intend to overwrite or modify the behavior of
+ * external links.
+ * \li H5Lregister() can be used only with link class identifiers
+ * in the user-definable range (see table above).
+ * \li The hard and soft links defined by the HDF5 library,
+ * #H5L_TYPE_HARD and #H5L_TYPE_SOFT, reside in the reserved
+ * range below #H5L_TYPE_UD_MIN and cannot be redefined or
+ * modified.
+ * \li H5Lis_registered() can be used to determine whether a desired
+ * link class identifier is available. \Emph{Note that this
+ * function will tell you only whether the link class identifier
+ * has been registered with the installed copy of HDF5; it
+ * cannot tell you whether the link class has been registered
+ * with The HDF Group.}
+ * \li #H5L_TYPE_MAX is the maximum allowed value for a link type
+ * identifier.
+ * \li #H5L_TYPE_UD_MIN equals #H5L_TYPE_EXTERNAL.
+ * \li #H5L_TYPE_UD_MAX equals #H5L_TYPE_MAX.
+ * \li #H5L_TYPE_ERROR indicates that an error has occurred.
+ *
+ * \note \Bold{Registration with The HDF Group:}\n
+ * There are sometimes reasons to take a broader approach to registering
+ * a user-defined link class than just invoking H5Lregister(). For
+ * example:
+ * \li A user-defined link class is intended for use across an
+ * organization, among collaborators, or across a community of users.
+ * \li An application or library overlying HDF5 invokes a user-defined
+ * link class that must be shipped with the software.
+ * \li Files are distributed that make use of a user-defined link class.
+ * \li Or simply, a specific user-defined link class is thought to be
+ * widely useful.
+ *
+ * In such cases, you are encouraged to register that link class with
+ * The HDF Group's Helpdesk. The HDF Group maintains a registry of known
+ * user-defined link classes and tracks the selected link class
+ * identifiers. This registry is intended to reduce the risk of
+ * collisions between \c class_id values and to help coordinate the use
+ * of specialized link classes.
+ *
+ * \since 1.8.0
+ *
+ */
+H5_DLL herr_t H5Lregister(const H5L_class_t *cls);
+/**
+ * \ingroup H5LA
+ *
+ * \brief Unregisters a class of user-defined links
+ *
+ * \param[in] id User-defined link class identifier
+ *
+ * \return \herr_t
+ *
+ * \details H5Lunregister() unregisters a class of user-defined links,
+ * preventing them from being traversed, queried, moved, etc.
+ *
+ * \note A link class can be re-registered using H5Lregister().
+ *
+ * \since 1.8.0
+ *
+ */
+H5_DLL herr_t H5Lunregister(H5L_type_t id);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Symbols defined for compatibility with previous versions of the HDF5 API.
+ *
+ * Use of these symbols is deprecated.
+ */
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+
+/* Previous versions of the H5L_class_t struct */
+#define H5L_LINK_CLASS_T_VERS_0 0
+
+/** Callback during link traversal */
+typedef hid_t (*H5L_traverse_0_func_t)(const char *link_name, hid_t cur_group, const void *lnkdata,
+ size_t lnkdata_size, hid_t lapl_id);
+
+/** User-defined link types */
+typedef struct {
+ int version; /**< Version number of this struct */
+ H5L_type_t id; /**< Link type ID */
+ const char * comment; /**< Comment for debugging */
+ H5L_create_func_t create_func; /**< Callback during link creation */
+ H5L_move_func_t move_func; /**< Callback after moving link */
+ H5L_copy_func_t copy_func; /**< Callback after copying link */
+ H5L_traverse_0_func_t trav_func; /**< Callback during link traversal */
+ H5L_delete_func_t del_func; /**< Callback for link deletion */
+ H5L_query_func_t query_func; /**< Callback for queries */
+} H5L_class_0_t;
+
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+#endif /* _H5Ldevelop_H */
diff --git a/src/H5Lprivate.h b/src/H5Lprivate.h
index b114c17..53b8726 100644
--- a/src/H5Lprivate.h
+++ b/src/H5Lprivate.h
@@ -18,8 +18,9 @@
#ifndef H5Lprivate_H
#define H5Lprivate_H
-/* Include package's public header */
+/* Include package's public headers */
#include "H5Lpublic.h"
+#include "H5Ldevelop.h"
/* Private headers needed by this file */
#include "H5Gprivate.h" /* Groups */
diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h
index 5274649..f665526 100644
--- a/src/H5Lpublic.h
+++ b/src/H5Lpublic.h
@@ -46,11 +46,6 @@
*/
#define H5L_SAME_LOC 0 /* (hid_t) */
-/**
- * \brief Current version of the H5L_class_t struct
- */
-#define H5L_LINK_CLASS_T_VERS 1
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -105,65 +100,6 @@ typedef struct {
} H5L_info2_t;
//! <!-- [H5L_info2_t_snip] -->
-/* The H5L_class_t struct can be used to override the behavior of a
- * "user-defined" link class. Users should populate the struct with callback
- * functions defined below.
- */
-/* Callback prototypes for user-defined links */
-/**
- * \brief Link creation callback
- */
-typedef herr_t (*H5L_create_func_t)(const char *link_name, hid_t loc_group, const void *lnkdata,
- size_t lnkdata_size, hid_t lcpl_id);
-/**
- * \brief Callback for link move
- */
-typedef herr_t (*H5L_move_func_t)(const char *new_name, hid_t new_loc, const void *lnkdata,
- size_t lnkdata_size);
-/**
- * \brief Callback for link copy
- */
-typedef herr_t (*H5L_copy_func_t)(const char *new_name, hid_t new_loc, const void *lnkdata,
- size_t lnkdata_size);
-/**
- * \brief Callback during link traversal
- */
-typedef hid_t (*H5L_traverse_func_t)(const char *link_name, hid_t cur_group, const void *lnkdata,
- size_t lnkdata_size, hid_t lapl_id, hid_t dxpl_id);
-/**
- * \brief Callback for link deletion
- */
-typedef herr_t (*H5L_delete_func_t)(const char *link_name, hid_t file, const void *lnkdata,
- size_t lnkdata_size);
-/**
- * \brief Callback for querying the link.
- *
- * Returns the size of the buffer needed.
- */
-typedef ssize_t (*H5L_query_func_t)(const char *link_name, const void *lnkdata, size_t lnkdata_size,
- void *buf /*out*/, size_t buf_size);
-
-/**
- * \brief Link prototype
- *
- * The H5L_class_t struct can be used to override the behavior of a
- * "user-defined" link class. Users should populate the struct with callback
- * functions defined elsewhere.
- */
-//! <!-- [H5L_class_t_snip] -->
-typedef struct {
- int version; /**< Version number of this struct */
- H5L_type_t id; /**< Link type ID */
- const char * comment; /**< Comment for debugging */
- H5L_create_func_t create_func; /**< Callback during link creation */
- H5L_move_func_t move_func; /**< Callback after moving link */
- H5L_copy_func_t copy_func; /**< Callback after copying link */
- H5L_traverse_func_t trav_func; /**< Callback during link traversal */
- H5L_delete_func_t del_func; /**< Callback for link deletion */
- H5L_query_func_t query_func; /**< Callback for queries */
-} H5L_class_t;
-//! <!-- [H5L_class_t_snip] -->
-
/**
* \brief Prototype for H5Literate2(), H5Literate_by_name2() operator
*
@@ -1250,179 +1186,6 @@ H5_DLL herr_t H5Lcreate_ud(hid_t link_loc_id, const char *link_name, H5L_type_t
/**
* \ingroup H5LA
*
- * \brief Registers a user-defined link class or changes behavior of an
- * existing class
- *
- * \param[in] cls Pointer to a buffer containing the struct describing the
- * user-defined link class
- *
- * \return \herr_t
- *
- * \details H5Lregister() registers a class of user-defined links, or changes
- * the behavior of an existing class.
- *
- * \p cls is a pointer to a buffer containing a copy of the
- * H5L_class_t struct. This struct is defined in H5Lpublic.h as
- * follows:
- * \snippet this H5L_class_t_snip
- *
- * The class definition passed with \p cls must include at least the
- * following:
- * \li An H5L_class_t version (which should be #H5L_LINK_CLASS_T_VERS)
- * \li A link class identifier, \c class_id
- * \li A traversal function, \c trav_func
- *
- * Remaining \c struct members are optional and may be passed as NULL.
- *
- * The link class passed in \c class_id must be in the user-definable
- * range between #H5L_TYPE_UD_MIN and #H5L_TYPE_UD_MAX
- * (see the table below) and will override
- * any existing link class with that identifier.
- *
- * As distributed, valid values of \c class_id used in HDF5 include
- * the following (defined in H5Lpublic.h):
- * \link_types
- *
- * The hard and soft link class identifiers cannot be modified or
- * reassigned, but the external link class is implemented as an
- * example in the user-definable link class identifier range.
- * H5Lregister() is used to register additional link classes. It could
- * also be used to modify the behavior of the external link class,
- * though that is not recommended.
- *
- * The following table summarizes existing link types and values and
- * the reserved and user-definable link class identifier value ranges.
- * <table>
- * <tr>
- * <th>Link class identifier or Value range</th>
- * <th>Description</th>
- * <th>Link class or label</th>
- * </tr>
- * <tr>
- * <td>0 to 63</td>
- * <td>Reserved range</td>
- * <td></td>
- * </tr>
- * <tr>
- * <td>64 to 255</td>
- * <td>User-definable range</td>
- * <td></td>
- * </tr>
- * <tr>
- * <td>64</td>
- * <td>Minimum user-defined value</td>
- * <td>#H5L_TYPE_UD_MIN</td>
- * </tr>
- * <tr>
- * <td>64</td>
- * <td>External link</td>
- * <td>#H5L_TYPE_EXTERNAL</td>
- * </tr>
- * <tr>
- * <td>255</td>
- * <td>Maximum user-defined value</td>
- * <td>#H5L_TYPE_UD_MAX</td>
- * </tr>
- * <tr>
- * <td>255</td>
- * <td>Maximum value</td>
- * <td>#H5L_TYPE_MAX</td>
- * </tr>
- * <tr>
- * <td>-1</td>
- * <td>Error</td>
- * <td>#H5L_TYPE_ERROR</td>
- * </tr>
- * </table>
- *
- * Note that HDF5 internally registers user-defined link classes only
- * by the numeric value of the link class identifier. An application,
- * on the other hand, will generally use a name for a user-defined
- * class, if for no other purpose than as a variable name. Assume,
- * for example, that a complex link type is registered with the link
- * class identifier 73 and that the code includes the following
- * assignment:
- * \code
- * H5L_TYPE_COMPLEX_A = 73
- * \endcode
- * The application can refer to the link class with a term,
- * \c H5L_TYPE_COMPLEX_A, that conveys meaning to a human reviewing
- * the code, while HDF5 recognizes it by the more cryptic numeric
- * identifier, 73.
- *
- * \attention Important details and considerations include the following:
- * \li If you plan to distribute files or software with a
- * user-defined link class, please contact the Help Desk at
- * The HDF Group to help prevent collisions between \c class_id
- * values. See below.
- * \li As distributed with HDF5, the external link class is
- * implemented as an example of a user-defined link class with
- * #H5L_TYPE_EXTERNAL equal to #H5L_TYPE_UD_MIN. \c class_id in
- * the H5L_class_t \c struct must not equal #H5L_TYPE_UD_MIN
- * unless you intend to overwrite or modify the behavior of
- * external links.
- * \li H5Lregister() can be used only with link class identifiers
- * in the user-definable range (see table above).
- * \li The hard and soft links defined by the HDF5 library,
- * #H5L_TYPE_HARD and #H5L_TYPE_SOFT, reside in the reserved
- * range below #H5L_TYPE_UD_MIN and cannot be redefined or
- * modified.
- * \li H5Lis_registered() can be used to determine whether a desired
- * link class identifier is available. \Emph{Note that this
- * function will tell you only whether the link class identifier
- * has been registered with the installed copy of HDF5; it
- * cannot tell you whether the link class has been registered
- * with The HDF Group.}
- * \li #H5L_TYPE_MAX is the maximum allowed value for a link type
- * identifier.
- * \li #H5L_TYPE_UD_MIN equals #H5L_TYPE_EXTERNAL.
- * \li #H5L_TYPE_UD_MAX equals #H5L_TYPE_MAX.
- * \li #H5L_TYPE_ERROR indicates that an error has occurred.
- *
- * \note \Bold{Registration with The HDF Group:}\n
- * There are sometimes reasons to take a broader approach to registering
- * a user-defined link class than just invoking H5Lregister(). For
- * example:
- * \li A user-defined link class is intended for use across an
- * organization, among collaborators, or across a community of users.
- * \li An application or library overlying HDF5 invokes a user-defined
- * link class that must be shipped with the software.
- * \li Files are distributed that make use of a user-defined link class.
- * \li Or simply, a specific user-defined link class is thought to be
- * widely useful.
- *
- * In such cases, you are encouraged to register that link class with
- * The HDF Group's Helpdesk. The HDF Group maintains a registry of known
- * user-defined link classes and tracks the selected link class
- * identifiers. This registry is intended to reduce the risk of
- * collisions between \c class_id values and to help coordinate the use
- * of specialized link classes.
- *
- * \since 1.8.0
- *
- */
-H5_DLL herr_t H5Lregister(const H5L_class_t *cls);
-/**
- * \ingroup H5LA
- *
- * \brief Unregisters a class of user-defined links
- *
- * \param[in] id User-defined link class identifier
- *
- * \return \herr_t
- *
- * \details H5Lunregister() unregisters a class of user-defined links,
- * preventing them from being traversed, queried, moved, etc.
- *
- * \note A link class can be re-registered using H5Lregister().
- *
- * \since 1.8.0
- *
- */
-H5_DLL herr_t H5Lunregister(H5L_type_t id);
-/**
- * \ingroup H5LA
- *
* \brief Determines whether a class of user-defined links is registered
*
* \param[in] id User-defined link class identifier
@@ -1647,9 +1410,6 @@ H5_DLL herr_t H5Lcreate_external(const char *file_name, const char *obj_name, hi
/* Macros */
-/* Previous versions of the H5L_class_t struct */
-#define H5L_LINK_CLASS_T_VERS_0 0
-
/* Typedefs */
//! <!-- [H5L_info1_t_snip] -->
@@ -1668,23 +1428,6 @@ typedef struct {
} H5L_info1_t;
//! <!-- [H5L_info1_t_snip] -->
-/** Callback during link traversal */
-typedef hid_t (*H5L_traverse_0_func_t)(const char *link_name, hid_t cur_group, const void *lnkdata,
- size_t lnkdata_size, hid_t lapl_id);
-
-/** User-defined link types */
-typedef struct {
- int version; /**< Version number of this struct */
- H5L_type_t id; /**< Link type ID */
- const char * comment; /**< Comment for debugging */
- H5L_create_func_t create_func; /**< Callback during link creation */
- H5L_move_func_t move_func; /**< Callback after moving link */
- H5L_copy_func_t copy_func; /**< Callback after copying link */
- H5L_traverse_0_func_t trav_func; /**< Callback during link traversal */
- H5L_delete_func_t del_func; /**< Callback for link deletion */
- H5L_query_func_t query_func; /**< Callback for queries */
-} H5L_class_0_t;
-
/** Prototype for H5Literate1() / H5Literate_by_name1() operator */
//! <!-- [H5L_iterate1_t_snip] -->
typedef herr_t (*H5L_iterate1_t)(hid_t group, const char *name, const H5L_info1_t *info, void *op_data);
diff --git a/src/H5MF.c b/src/H5MF.c
index 3ba5a3f..9bb3963 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -1035,7 +1035,7 @@ H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size)
/* Insert the new page into the Page Buffer list of new pages so
we don't read an empty page from disk */
- if (f->shared->pb_ptr != NULL && H5PB_add_new_page(f->shared, alloc_type, new_page) < 0)
+ if (f->shared->page_buf != NULL && H5PB_add_new_page(f->shared, alloc_type, new_page) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF,
"can't add new page to Page Buffer new page list")
@@ -1366,7 +1366,7 @@ H5MF__xfree_impl(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size)
*
* JRM -- 4/28/20
*/
- if (ret_value == SUCCEED && f->shared->pb_ptr && size >= f->shared->fs_page_size) {
+ if (ret_value == SUCCEED && f->shared->page_buf && size >= f->shared->fs_page_size) {
HDassert(H5F_SHARED_PAGED_AGGR(f->shared));
diff --git a/src/H5MFsection.c b/src/H5MFsection.c
index bbd02f4..1494d21 100644
--- a/src/H5MFsection.c
+++ b/src/H5MFsection.c
@@ -755,7 +755,7 @@ H5MF__sect_small_merge(H5FS_section_info_t **_sect1, H5FS_section_info_t *_sect2
* Note: Update of raw data page (large or small sized) is handled
* by the PB cache
*/
- if (udata->f->shared->pb_ptr != NULL)
+ if (udata->f->shared->page_buf != NULL)
if (H5PB_remove_entry(udata->f->shared, (*sect1)->sect_info.addr) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free merged section from page buffer")
diff --git a/src/H5O.c b/src/H5O.c
index 9ddd789..311c5b2 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -448,6 +448,7 @@ H5O__copy_api_common(hid_t src_loc_id, const char *src_name, hid_t dst_loc_id, c
/* Set the LCPL for the API context */
H5CX_set_lcpl(lcpl_id);
+ /* Setup and check args */
if (H5VL_setup_loc_args(src_loc_id, &vol_obj1, &loc_params1) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't set object access arguments")
@@ -621,8 +622,7 @@ H5O__flush_api_common(hid_t obj_id, void **token_ptr, H5VL_object_t **_vol_obj_p
FUNC_ENTER_STATIC
- /* Check args */
-
+ /* Setup and check args */
if (H5VL_setup_loc_args(obj_id, vol_obj_ptr, &loc_params) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't set object access arguments")
@@ -723,8 +723,7 @@ H5O__refresh_api_common(hid_t oid, void **token_ptr, H5VL_object_t **_vol_obj_pt
FUNC_ENTER_STATIC
- /* Check args */
-
+ /* Setup and check args */
if (H5VL_setup_loc_args(oid, vol_obj_ptr, &loc_params) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't set object access arguments")
diff --git a/src/H5PB.c b/src/H5PB.c
index 9084452..57ef7fb 100644
--- a/src/H5PB.c
+++ b/src/H5PB.c
@@ -78,9 +78,9 @@ typedef struct _metadata_section {
/* Local Prototypes */
/********************/
-static H5PB_entry_t *H5PB__allocate_page(H5PB_t *pb_ptr, size_t buf_size, hbool_t clean_image);
+static H5PB_entry_t *H5PB__allocate_page(H5PB_t *page_buf, size_t buf_size, hbool_t clean_image);
-static herr_t H5PB__create_new_page(H5PB_t *pb_ptr, haddr_t addr, size_t size, H5FD_mem_t type,
+static herr_t H5PB__create_new_page(H5PB_t *page_buf, haddr_t addr, size_t size, H5FD_mem_t type,
hbool_t clean_image, H5PB_entry_t **entry_ptr_ptr);
static void H5PB__deallocate_page(H5PB_entry_t *entry_ptr);
@@ -105,8 +105,6 @@ static herr_t H5PB__write_meta(H5F_shared_t *, H5FD_mem_t, haddr_t, size_t, cons
static herr_t H5PB__write_raw(H5F_shared_t *, H5FD_mem_t, haddr_t, size_t, const void *);
-static void H5PB_log_access_by_size_counts(const H5PB_t *);
-
/*********************/
/* Package Variables */
/*********************/
@@ -143,58 +141,58 @@ H5FL_DEFINE_STATIC(H5PB_entry_t);
*-------------------------------------------------------------------------
*/
herr_t
-H5PB_reset_stats(H5PB_t *pb_ptr)
+H5PB_reset_stats(H5PB_t *page_buf)
{
int i;
FUNC_ENTER_NOAPI_NOERR
/* Sanity checks */
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
for (i = 0; i < H5PB__NUM_STAT_TYPES; i++) {
- pb_ptr->bypasses[i] = 0;
- pb_ptr->accesses[i] = 0;
- pb_ptr->hits[i] = 0;
- pb_ptr->misses[i] = 0;
- pb_ptr->loads[i] = 0;
- pb_ptr->insertions[i] = 0;
- pb_ptr->flushes[i] = 0;
- pb_ptr->evictions[i] = 0;
- pb_ptr->clears[i] = 0;
+ page_buf->bypasses[i] = 0;
+ page_buf->accesses[i] = 0;
+ page_buf->hits[i] = 0;
+ page_buf->misses[i] = 0;
+ page_buf->loads[i] = 0;
+ page_buf->insertions[i] = 0;
+ page_buf->flushes[i] = 0;
+ page_buf->evictions[i] = 0;
+ page_buf->clears[i] = 0;
}
- pb_ptr->max_lru_len = 0;
- pb_ptr->max_lru_size = 0;
- pb_ptr->lru_md_skips = 0;
- pb_ptr->lru_rd_skips = 0;
- pb_ptr->total_ht_insertions = 0;
- pb_ptr->total_ht_deletions = 0;
- pb_ptr->successful_ht_searches = 0;
- pb_ptr->total_successful_ht_search_depth = 0;
- pb_ptr->failed_ht_searches = 0;
- pb_ptr->total_failed_ht_search_depth = 0;
- pb_ptr->max_index_len = 0;
- pb_ptr->max_clean_index_len = 0;
- pb_ptr->max_dirty_index_len = 0;
- pb_ptr->max_clean_index_size = 0;
- pb_ptr->max_dirty_index_size = 0;
- pb_ptr->max_index_size = 0;
- pb_ptr->max_rd_pages = 0;
- pb_ptr->max_md_pages = 0;
- pb_ptr->max_mpmde_count = 0;
- pb_ptr->lru_tl_skips = 0;
- pb_ptr->max_tl_len = 0;
- pb_ptr->max_tl_size = 0;
- pb_ptr->delayed_writes = 0;
- pb_ptr->total_delay = 0;
- pb_ptr->max_dwl_len = 0;
- pb_ptr->max_dwl_size = 0;
- pb_ptr->total_dwl_ins_depth = 0;
- pb_ptr->md_read_splits = 0;
- pb_ptr->md_write_splits = 0;
+ page_buf->max_lru_len = 0;
+ page_buf->max_lru_size = 0;
+ page_buf->lru_md_skips = 0;
+ page_buf->lru_rd_skips = 0;
+ page_buf->total_ht_insertions = 0;
+ page_buf->total_ht_deletions = 0;
+ page_buf->successful_ht_searches = 0;
+ page_buf->total_successful_ht_search_depth = 0;
+ page_buf->failed_ht_searches = 0;
+ page_buf->total_failed_ht_search_depth = 0;
+ page_buf->max_index_len = 0;
+ page_buf->max_clean_index_len = 0;
+ page_buf->max_dirty_index_len = 0;
+ page_buf->max_clean_index_size = 0;
+ page_buf->max_dirty_index_size = 0;
+ page_buf->max_index_size = 0;
+ page_buf->max_rd_pages = 0;
+ page_buf->max_md_pages = 0;
+ page_buf->max_mpmde_count = 0;
+ page_buf->lru_tl_skips = 0;
+ page_buf->max_tl_len = 0;
+ page_buf->max_tl_size = 0;
+ page_buf->delayed_writes = 0;
+ page_buf->total_delay = 0;
+ page_buf->max_dwl_len = 0;
+ page_buf->max_dwl_size = 0;
+ page_buf->total_dwl_ins_depth = 0;
+ page_buf->md_read_splits = 0;
+ page_buf->md_write_splits = 0;
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -237,29 +235,29 @@ H5PB_reset_stats(H5PB_t *pb_ptr)
*-------------------------------------------------------------------------
*/
herr_t
-H5PB_get_stats(const H5PB_t *pb_ptr, unsigned accesses[2], unsigned hits[2], unsigned misses[2],
+H5PB_get_stats(const H5PB_t *page_buf, unsigned accesses[2], unsigned hits[2], unsigned misses[2],
unsigned evictions[2], unsigned bypasses[2])
{
FUNC_ENTER_NOAPI_NOERR
/* Sanity checks */
- HDassert(pb_ptr);
-
- accesses[0] = (unsigned)pb_ptr->accesses[0];
- accesses[1] = (unsigned)pb_ptr->accesses[1];
- accesses[2] = (unsigned)pb_ptr->accesses[2];
- hits[0] = (unsigned)pb_ptr->hits[0];
- hits[1] = (unsigned)pb_ptr->hits[1];
- hits[2] = (unsigned)pb_ptr->hits[2];
- misses[0] = (unsigned)pb_ptr->misses[0];
- misses[1] = (unsigned)pb_ptr->misses[1];
- misses[2] = (unsigned)pb_ptr->misses[2];
- evictions[0] = (unsigned)pb_ptr->evictions[0];
- evictions[1] = (unsigned)pb_ptr->evictions[1];
- evictions[2] = (unsigned)pb_ptr->evictions[2];
- bypasses[0] = (unsigned)pb_ptr->bypasses[0];
- bypasses[1] = (unsigned)pb_ptr->bypasses[1];
- bypasses[2] = (unsigned)pb_ptr->bypasses[2];
+ HDassert(page_buf);
+
+ accesses[0] = (unsigned)page_buf->accesses[0];
+ accesses[1] = (unsigned)page_buf->accesses[1];
+ accesses[2] = (unsigned)page_buf->accesses[2];
+ hits[0] = (unsigned)page_buf->hits[0];
+ hits[1] = (unsigned)page_buf->hits[1];
+ hits[2] = (unsigned)page_buf->hits[2];
+ misses[0] = (unsigned)page_buf->misses[0];
+ misses[1] = (unsigned)page_buf->misses[1];
+ misses[2] = (unsigned)page_buf->misses[2];
+ evictions[0] = (unsigned)page_buf->evictions[0];
+ evictions[1] = (unsigned)page_buf->evictions[1];
+ evictions[2] = (unsigned)page_buf->evictions[2];
+ bypasses[0] = (unsigned)page_buf->bypasses[0];
+ bypasses[1] = (unsigned)page_buf->bypasses[1];
+ bypasses[2] = (unsigned)page_buf->bypasses[2];
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5PB_get_stats */
@@ -281,103 +279,105 @@ H5PB_get_stats(const H5PB_t *pb_ptr, unsigned accesses[2], unsigned hits[2], uns
*-------------------------------------------------------------------------
*/
herr_t
-H5PB_print_stats(const H5PB_t *pb_ptr)
+H5PB_print_stats(const H5PB_t *page_buf)
{
- double ave_succ_search_depth = 0.0L;
- double ave_failed_search_depth = 0.0L;
- double ave_delayed_write = 0.0L;
- double ave_delayed_write_ins_depth = 0.0L;
+ double ave_succ_search_depth = 0.0;
+ double ave_failed_search_depth = 0.0;
+ double ave_delayed_write = 0.0;
+ double ave_delayed_write_ins_depth = 0.0;
FUNC_ENTER_NOAPI_NOINIT_NOERR
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDfprintf(stdout, "\n\nPage Buffer Statistics (raw/meta/mpmde): \n\n");
- HDfprintf(stdout, "bypasses = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->bypasses[0] + pb_ptr->bypasses[1] + pb_ptr->bypasses[2]), pb_ptr->bypasses[0],
- pb_ptr->bypasses[1], pb_ptr->bypasses[2]);
+ HDfprintf(stdout, "bypasses = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->bypasses[0] + page_buf->bypasses[1] + page_buf->bypasses[2]), page_buf->bypasses[0],
+ page_buf->bypasses[1], page_buf->bypasses[2]);
- HDfprintf(stdout, "acesses = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->accesses[0] + pb_ptr->accesses[1] + pb_ptr->accesses[2]), pb_ptr->accesses[0],
- pb_ptr->accesses[1], pb_ptr->accesses[2]);
+ HDfprintf(stdout, "acesses = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->accesses[0] + page_buf->accesses[1] + page_buf->accesses[2]), page_buf->accesses[0],
+ page_buf->accesses[1], page_buf->accesses[2]);
- HDfprintf(stdout, "hits = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->hits[0] + pb_ptr->hits[1] + pb_ptr->hits[2]), pb_ptr->hits[0], pb_ptr->hits[1],
- pb_ptr->hits[2]);
+ HDfprintf(stdout, "hits = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->hits[0] + page_buf->hits[1] + page_buf->hits[2]), page_buf->hits[0],
+ page_buf->hits[1], page_buf->hits[2]);
- HDfprintf(stdout, "misses = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->misses[0] + pb_ptr->misses[1] + pb_ptr->misses[2]), pb_ptr->misses[0],
- pb_ptr->misses[1], pb_ptr->misses[2]);
+ HDfprintf(stdout, "misses = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->misses[0] + page_buf->misses[1] + page_buf->misses[2]), page_buf->misses[0],
+ page_buf->misses[1], page_buf->misses[2]);
- HDfprintf(stdout, "loads = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->loads[0] + pb_ptr->loads[1] + pb_ptr->loads[2]), pb_ptr->loads[0], pb_ptr->loads[1],
- pb_ptr->loads[2]);
+ HDfprintf(stdout, "loads = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->loads[0] + page_buf->loads[1] + page_buf->loads[2]), page_buf->loads[0],
+ page_buf->loads[1], page_buf->loads[2]);
- HDfprintf(stdout, "insertions = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->insertions[0] + pb_ptr->insertions[1] + pb_ptr->insertions[2]), pb_ptr->insertions[0],
- pb_ptr->insertions[1], pb_ptr->insertions[2]);
+ HDfprintf(stdout, "insertions = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->insertions[0] + page_buf->insertions[1] + page_buf->insertions[2]),
+ page_buf->insertions[0], page_buf->insertions[1], page_buf->insertions[2]);
- HDfprintf(stdout, "flushes = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->flushes[0] + pb_ptr->flushes[1] + pb_ptr->flushes[2]), pb_ptr->flushes[0],
- pb_ptr->flushes[1], pb_ptr->flushes[2]);
+ HDfprintf(stdout, "flushes = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->flushes[0] + page_buf->flushes[1] + page_buf->flushes[2]), page_buf->flushes[0],
+ page_buf->flushes[1], page_buf->flushes[2]);
- HDfprintf(stdout, "evictions = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->evictions[0] + pb_ptr->evictions[1] + pb_ptr->evictions[2]), pb_ptr->evictions[0],
- pb_ptr->evictions[1], pb_ptr->evictions[2]);
+ HDfprintf(stdout, "evictions = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->evictions[0] + page_buf->evictions[1] + page_buf->evictions[2]),
+ page_buf->evictions[0], page_buf->evictions[1], page_buf->evictions[2]);
- HDfprintf(stdout, "clears = %lld (%lld/%lld/%lld)\n",
- (pb_ptr->clears[0] + pb_ptr->clears[1] + pb_ptr->clears[2]), pb_ptr->clears[0],
- pb_ptr->clears[1], pb_ptr->clears[2]);
+ HDfprintf(stdout, "clears = %" PRIi64 " (%" PRIi64 "/%" PRIi64 "/%" PRIi64 ")\n",
+ (page_buf->clears[0] + page_buf->clears[1] + page_buf->clears[2]), page_buf->clears[0],
+ page_buf->clears[1], page_buf->clears[2]);
- HDfprintf(stdout, "max LRU len / size = %lld / %lld\n", pb_ptr->max_lru_len, pb_ptr->max_lru_size);
+ HDfprintf(stdout, "max LRU len / size = %" PRIi64 " / %" PRIi64 "\n", page_buf->max_lru_len,
+ page_buf->max_lru_size);
- HDfprintf(stdout, "LRU make space md/rd/tl skips = %lld/%lld/%lld\n", pb_ptr->lru_md_skips,
- pb_ptr->lru_rd_skips, pb_ptr->lru_tl_skips);
+ HDfprintf(stdout, "LRU make space md/rd/tl skips = %" PRIi64 "/%" PRIi64 "/%" PRIi64 "\n",
+ page_buf->lru_md_skips, page_buf->lru_rd_skips, page_buf->lru_tl_skips);
- HDfprintf(stdout, "hash table insertions / deletions = %lld / %lld\n", pb_ptr->total_ht_insertions,
- pb_ptr->total_ht_deletions);
+ HDfprintf(stdout, "hash table insertions / deletions = %" PRIi64 " / %" PRIi64 "\n",
+ page_buf->total_ht_insertions, page_buf->total_ht_deletions);
- if (pb_ptr->successful_ht_searches > 0) {
+ if (page_buf->successful_ht_searches > 0) {
ave_succ_search_depth =
- (double)(pb_ptr->total_successful_ht_search_depth) / (double)(pb_ptr->successful_ht_searches);
+ (double)(page_buf->total_successful_ht_search_depth) / (double)(page_buf->successful_ht_searches);
}
- HDfprintf(stdout, "successful ht searches / ave depth = %lld / %llf\n", pb_ptr->successful_ht_searches,
- ave_succ_search_depth);
+ HDfprintf(stdout, "successful ht searches / ave depth = %" PRIi64 " / %g\n",
+ page_buf->successful_ht_searches, ave_succ_search_depth);
- if (pb_ptr->failed_ht_searches > 0) {
+ if (page_buf->failed_ht_searches > 0) {
ave_failed_search_depth =
- (double)(pb_ptr->total_failed_ht_search_depth) / (double)(pb_ptr->failed_ht_searches);
+ (double)(page_buf->total_failed_ht_search_depth) / (double)(page_buf->failed_ht_searches);
}
- HDfprintf(stdout, "failed ht searches / ave depth = %lld / %llf\n", pb_ptr->failed_ht_searches,
+ HDfprintf(stdout, "failed ht searches / ave depth = %" PRIi64 " / %g\n", page_buf->failed_ht_searches,
ave_failed_search_depth);
- HDfprintf(stdout, "max index length / size = %lld / %lld\n", pb_ptr->max_index_len,
- pb_ptr->max_index_size);
+ HDfprintf(stdout, "max index length / size = %" PRIi64 " / %" PRIi64 "\n", page_buf->max_index_len,
+ page_buf->max_index_size);
- HDfprintf(stdout, "max rd / md / mpmde entries = %lld / %lld / %lld\n", pb_ptr->max_rd_pages,
- pb_ptr->max_md_pages, pb_ptr->max_mpmde_count);
+ HDfprintf(stdout, "max rd / md / mpmde entries = %" PRIi64 " / %" PRIi64 " / %" PRIi64 "\n",
+ page_buf->max_rd_pages, page_buf->max_md_pages, page_buf->max_mpmde_count);
- HDfprintf(stdout, "tick list max len / size = %lld / %lld\n", pb_ptr->max_tl_len, pb_ptr->max_tl_size);
+ HDfprintf(stdout, "tick list max len / size = %" PRIi64 " / %" PRIi64 "\n", page_buf->max_tl_len,
+ page_buf->max_tl_size);
- HDfprintf(stdout, "delayed write list max len / size = %lld / %lld\n", pb_ptr->max_dwl_len,
- pb_ptr->max_dwl_size);
+ HDfprintf(stdout, "delayed write list max len / size = %" PRIi64 " / %" PRIi64 "\n",
+ page_buf->max_dwl_len, page_buf->max_dwl_size);
- if (pb_ptr->delayed_writes > 0) {
+ if (page_buf->delayed_writes > 0) {
- ave_delayed_write = (double)(pb_ptr->total_delay) / (double)(pb_ptr->delayed_writes);
+ ave_delayed_write = (double)(page_buf->total_delay) / (double)(page_buf->delayed_writes);
ave_delayed_write_ins_depth =
- (double)(pb_ptr->total_dwl_ins_depth) / (double)(pb_ptr->delayed_writes);
+ (double)(page_buf->total_dwl_ins_depth) / (double)(page_buf->delayed_writes);
}
- HDfprintf(stdout, "delayed writes / ave delay / ave ins depth = %lld / %llf / %llf\n",
- pb_ptr->delayed_writes, ave_delayed_write, ave_delayed_write_ins_depth);
+ HDfprintf(stdout, "delayed writes / ave delay / ave ins depth = %" PRIi64 " / %g / %g\n",
+ page_buf->delayed_writes, ave_delayed_write, ave_delayed_write_ins_depth);
- HDfprintf(stdout, "metadata read / write splits = %lld / %lld.\n", pb_ptr->md_read_splits,
- pb_ptr->md_write_splits);
+ HDfprintf(stdout, "metadata read / write splits = %" PRIi64 " / %" PRIi64 ".\n", page_buf->md_read_splits,
+ page_buf->md_write_splits);
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -425,7 +425,7 @@ herr_t
H5PB_add_new_page(H5F_shared_t *shared, H5FD_mem_t type, haddr_t page_addr)
{
hbool_t can_insert = TRUE;
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -433,22 +433,22 @@ H5PB_add_new_page(H5F_shared_t *shared, H5FD_mem_t type, haddr_t page_addr)
/* Sanity checks */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
if (H5FD_MEM_DRAW == type) { /* raw data page insertion */
- if ((pb_ptr->min_md_pages == pb_ptr->max_pages) || (pb_ptr->vfd_swmr)) {
+ if ((page_buf->min_md_pages == page_buf->max_pages) || (page_buf->vfd_swmr)) {
can_insert = FALSE;
}
}
else { /* metadata page insertion */
- if (pb_ptr->min_rd_pages == pb_ptr->max_pages) {
+ if (page_buf->min_rd_pages == page_buf->max_pages) {
can_insert = FALSE;
}
@@ -456,7 +456,8 @@ H5PB_add_new_page(H5F_shared_t *shared, H5FD_mem_t type, haddr_t page_addr)
if (can_insert) {
- if (H5PB__create_new_page(pb_ptr, page_addr, (size_t)(pb_ptr->page_size), type, TRUE, &entry_ptr) < 0)
+ if (H5PB__create_new_page(page_buf, page_addr, (size_t)(page_buf->page_size), type, TRUE,
+ &entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "new page buffer page creation failed.")
@@ -464,7 +465,7 @@ H5PB_add_new_page(H5F_shared_t *shared, H5FD_mem_t type, haddr_t page_addr)
entry_ptr->loaded = FALSE;
/* updates stats */
- H5PB__UPDATE_STATS_FOR_INSERTION(pb_ptr, entry_ptr);
+ H5PB__UPDATE_STATS_FOR_INSERTION(page_buf, entry_ptr);
}
done:
@@ -501,7 +502,7 @@ H5PB_create(H5F_shared_t *shared, size_t size, unsigned page_buf_min_meta_perc,
int i;
int32_t min_md_pages;
int32_t min_rd_pages;
- H5PB_t *pb_ptr = NULL;
+ H5PB_t *page_buf = NULL;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -559,70 +560,70 @@ H5PB_create(H5F_shared_t *shared, size_t size, unsigned page_buf_min_meta_perc,
}
/* Allocate the new page buffering structure */
- if (NULL == (pb_ptr = H5FL_MALLOC(H5PB_t)))
+ if (NULL == (page_buf = H5FL_MALLOC(H5PB_t)))
HGOTO_ERROR(H5E_PAGEBUF, H5E_NOSPACE, FAIL, "memory allocation failed")
/* initialize the new instance of H5PB_t */
- pb_ptr->magic = H5PB__H5PB_T_MAGIC;
- pb_ptr->page_size = shared->fs_page_size;
- H5_CHECKED_ASSIGN(pb_ptr->page_size, size_t, shared->fs_page_size, hsize_t);
- pb_ptr->max_pages = (int32_t)(size / shared->fs_page_size);
- pb_ptr->curr_pages = 0;
- pb_ptr->curr_md_pages = 0;
- pb_ptr->curr_rd_pages = 0;
- pb_ptr->min_md_pages = min_md_pages;
- pb_ptr->min_rd_pages = min_rd_pages;
+ page_buf->magic = H5PB__H5PB_T_MAGIC;
+ page_buf->page_size = shared->fs_page_size;
+ H5_CHECKED_ASSIGN(page_buf->page_size, size_t, shared->fs_page_size, hsize_t);
+ page_buf->max_pages = (int32_t)(size / shared->fs_page_size);
+ page_buf->curr_pages = 0;
+ page_buf->curr_md_pages = 0;
+ page_buf->curr_rd_pages = 0;
+ page_buf->min_md_pages = min_md_pages;
+ page_buf->min_rd_pages = min_rd_pages;
- pb_ptr->max_size = size;
- pb_ptr->min_meta_perc = page_buf_min_meta_perc;
- pb_ptr->min_raw_perc = page_buf_min_raw_perc;
+ page_buf->max_size = size;
+ page_buf->min_meta_perc = page_buf_min_meta_perc;
+ page_buf->min_raw_perc = page_buf_min_raw_perc;
/* index */
for (i = 0; i < H5PB__HASH_TABLE_LEN; i++)
- pb_ptr->ht[i] = NULL;
- pb_ptr->index_len = 0;
- pb_ptr->clean_index_len = 0;
- pb_ptr->dirty_index_len = 0;
- pb_ptr->index_size = 0;
- pb_ptr->clean_index_size = 0;
- pb_ptr->dirty_index_size = 0;
- pb_ptr->il_len = 0;
- pb_ptr->il_size = 0;
- pb_ptr->il_head = NULL;
- pb_ptr->il_tail = NULL;
+ page_buf->ht[i] = NULL;
+ page_buf->index_len = 0;
+ page_buf->clean_index_len = 0;
+ page_buf->dirty_index_len = 0;
+ page_buf->index_size = 0;
+ page_buf->clean_index_size = 0;
+ page_buf->dirty_index_size = 0;
+ page_buf->il_len = 0;
+ page_buf->il_size = 0;
+ page_buf->il_head = NULL;
+ page_buf->il_tail = NULL;
/* LRU */
- pb_ptr->LRU_len = 0;
- pb_ptr->LRU_size = 0;
- pb_ptr->LRU_head_ptr = NULL;
- pb_ptr->LRU_tail_ptr = NULL;
+ page_buf->LRU_len = 0;
+ page_buf->LRU_size = 0;
+ page_buf->LRU_head_ptr = NULL;
+ page_buf->LRU_tail_ptr = NULL;
/* VFD SWMR specific fields.
* The following fields are defined iff vfd_swmr_writer is TRUE.
*/
- pb_ptr->vfd_swmr = vfd_swmr;
- pb_ptr->vfd_swmr_writer = vfd_swmr_writer;
- pb_ptr->mpmde_count = 0;
- pb_ptr->cur_tick = 0;
+ page_buf->vfd_swmr = vfd_swmr;
+ page_buf->vfd_swmr_writer = vfd_swmr_writer;
+ page_buf->mpmde_count = 0;
+ page_buf->cur_tick = 0;
/* delayed write list */
- pb_ptr->max_delay = 0;
- pb_ptr->dwl_len = 0;
- pb_ptr->dwl_size = 0;
- pb_ptr->dwl_head_ptr = NULL;
- pb_ptr->dwl_tail_ptr = NULL;
+ page_buf->max_delay = 0;
+ page_buf->dwl_len = 0;
+ page_buf->dwl_size = 0;
+ page_buf->dwl_head_ptr = NULL;
+ page_buf->dwl_tail_ptr = NULL;
/* tick list */
- pb_ptr->tl_len = 0;
- pb_ptr->tl_size = 0;
- pb_ptr->tl_head_ptr = NULL;
- pb_ptr->tl_tail_ptr = NULL;
+ page_buf->tl_len = 0;
+ page_buf->tl_size = 0;
+ page_buf->tl_head_ptr = NULL;
+ page_buf->tl_tail_ptr = NULL;
- H5PB_reset_stats(pb_ptr);
+ H5PB_reset_stats(page_buf);
- shared->pb_ptr = pb_ptr;
+ shared->page_buf = page_buf;
/* if this is a VFD SWMR reader, inform the reader VFD that the
* page buffer is configured. Note that this is for sanity
@@ -644,9 +645,9 @@ done:
if (ret_value < 0) {
- if (pb_ptr != NULL) {
+ if (page_buf != NULL) {
- pb_ptr = H5FL_FREE(H5PB_t, pb_ptr);
+ page_buf = H5FL_FREE(H5PB_t, page_buf);
}
}
@@ -673,7 +674,7 @@ herr_t
H5PB_dest(H5F_shared_t *shared)
{
int i;
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
H5PB_entry_t *evict_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -684,20 +685,18 @@ H5PB_dest(H5F_shared_t *shared)
HDassert(shared);
/* flush and destroy the page buffer, if it exists */
- if (shared->pb_ptr) {
-
- pb_ptr = shared->pb_ptr;
+ if (shared->page_buf) {
- H5PB_log_access_by_size_counts(pb_ptr);
+ page_buf = shared->page_buf;
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
/* the current implementation if very inefficient, and will
* fail if there are any outstanding delayed writes -- must fix this
*/
for (i = 0; i < H5PB__HASH_TABLE_LEN; i++) {
- entry_ptr = pb_ptr->ht[i];
+ entry_ptr = page_buf->ht[i];
while (entry_ptr) {
@@ -708,7 +707,7 @@ H5PB_dest(H5F_shared_t *shared)
if (evict_ptr->is_dirty) {
- if (H5PB__flush_entry(shared, pb_ptr, evict_ptr) < 0)
+ if (H5PB__flush_entry(shared, page_buf, evict_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "Can't flush entry")
}
@@ -717,34 +716,34 @@ H5PB_dest(H5F_shared_t *shared)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "forced eviction failed")
- entry_ptr = pb_ptr->ht[i];
+ entry_ptr = page_buf->ht[i];
}
}
/* regular operations fields */
- HDassert(pb_ptr->curr_pages == 0);
- HDassert(pb_ptr->curr_md_pages == 0);
- HDassert(pb_ptr->curr_rd_pages == 0);
- HDassert(pb_ptr->index_len == 0);
- HDassert(pb_ptr->index_size == 0);
- HDassert(pb_ptr->LRU_len == 0);
- HDassert(pb_ptr->LRU_size == 0);
- HDassert(pb_ptr->LRU_head_ptr == NULL);
- HDassert(pb_ptr->LRU_tail_ptr == NULL);
+ HDassert(page_buf->curr_pages == 0);
+ HDassert(page_buf->curr_md_pages == 0);
+ HDassert(page_buf->curr_rd_pages == 0);
+ HDassert(page_buf->index_len == 0);
+ HDassert(page_buf->index_size == 0);
+ HDassert(page_buf->LRU_len == 0);
+ HDassert(page_buf->LRU_size == 0);
+ HDassert(page_buf->LRU_head_ptr == NULL);
+ HDassert(page_buf->LRU_tail_ptr == NULL);
/* VFD SWMR fields */
- HDassert(pb_ptr->dwl_len == 0);
- HDassert(pb_ptr->dwl_size == 0);
- HDassert(pb_ptr->dwl_head_ptr == NULL);
- HDassert(pb_ptr->dwl_tail_ptr == NULL);
-
- HDassert(pb_ptr->tl_len == 0);
- HDassert(pb_ptr->tl_size == 0);
- HDassert(pb_ptr->tl_head_ptr == NULL);
- HDassert(pb_ptr->tl_tail_ptr == NULL);
-
- pb_ptr->magic = 0;
- shared->pb_ptr = H5FL_FREE(H5PB_t, pb_ptr);
+ HDassert(page_buf->dwl_len == 0);
+ HDassert(page_buf->dwl_size == 0);
+ HDassert(page_buf->dwl_head_ptr == NULL);
+ HDassert(page_buf->dwl_tail_ptr == NULL);
+
+ HDassert(page_buf->tl_len == 0);
+ HDassert(page_buf->tl_size == 0);
+ HDassert(page_buf->tl_head_ptr == NULL);
+ HDassert(page_buf->tl_tail_ptr == NULL);
+
+ page_buf->magic = 0;
+ shared->page_buf = H5FL_FREE(H5PB_t, page_buf);
}
done:
@@ -771,7 +770,7 @@ herr_t
H5PB_flush(H5F_shared_t *shared)
{
int i;
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
H5PB_entry_t *flush_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -781,18 +780,18 @@ H5PB_flush(H5F_shared_t *shared)
/* Sanity check */
HDassert(shared);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- if (pb_ptr) {
+ if (page_buf) {
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
/* the current implementation is very inefficient, and will
* fail if there are any delayed writes -- must fix this
*/
for (i = 0; i < H5PB__HASH_TABLE_LEN; i++) {
- entry_ptr = pb_ptr->ht[i];
+ entry_ptr = page_buf->ht[i];
while (entry_ptr) {
@@ -806,7 +805,7 @@ H5PB_flush(H5F_shared_t *shared)
if (flush_ptr->delay_write_until != 0)
continue;
- if (H5PB__flush_entry(shared, pb_ptr, flush_ptr) < 0)
+ if (H5PB__flush_entry(shared, page_buf, flush_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "Can't flush entry")
}
@@ -842,7 +841,7 @@ herr_t
H5PB_page_exists(H5F_shared_t *shared, haddr_t addr, hbool_t *page_exists_ptr)
{
uint64_t page;
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -850,21 +849,21 @@ H5PB_page_exists(H5F_shared_t *shared, haddr_t addr, hbool_t *page_exists_ptr)
/* Sanity check */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDassert(page_exists_ptr);
/* Calculate the page offset */
- page = (addr / pb_ptr->page_size);
+ page = (addr / page_buf->page_size);
/* the supplied address should be page aligned */
- HDassert(addr == page * pb_ptr->page_size);
+ HDassert(addr == page * page_buf->page_size);
/* Search for page in the hash table */
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL)
HDassert((NULL == entry_ptr) || (entry_ptr->addr == addr));
@@ -889,16 +888,6 @@ H5PB_count_meta_access_by_size(H5PB_t *pb, size_t size)
pb->access_size_count[i]++;
}
-static void
-H5PB_log_access_by_size_counts(const H5PB_t *pb)
-{
- const size_t nslots = NELMTS(pb->access_size_count);
- size_t i, lo, hi;
-
- for (lo = 0, hi = pb->page_size, i = 0; i < nslots - 1; i++, lo = hi + 1, hi *= 2) {
- }
-}
-
/*-------------------------------------------------------------------------
*
* Function: H5PB_read
@@ -1030,12 +1019,12 @@ H5PB_log_access_by_size_counts(const H5PB_t *pb)
* 9) If the read is for metadata, is page aligned, is larger
* than one page, and there is a multi-page metadata entry
* at the target page address, test to see if
- * pb_ptr->vfd_swmr_write is TRUE.
+ * page_buf->vfd_swmr_write is TRUE.
*
* If it is, satisfy the read from the multi-page metadata
* entry, clipping the read if necessary.
*
- * if pb_ptr->vfd_swmr_write is FALSE, flag an error.
+ * if page_buf->vfd_swmr_write is FALSE, flag an error.
*
* 10) If the read is for metadata, is page aligned, is no
* larger than a page, test to see if the page buffer
@@ -1049,7 +1038,7 @@ H5PB_log_access_by_size_counts(const H5PB_t *pb)
*
* If it contains a multipage metadata entry at the target
* address, satisfy the read from the multi-page metadata
- * entry if pb_ptr->vfd_swmr_write is TRUE, and flag an
+ * entry if page_buf->vfd_swmr_write is TRUE, and flag an
* error otherwise.
*
* Observe that this function handles casses 1, 2, and 5
@@ -1074,7 +1063,7 @@ H5PB_log_access_by_size_counts(const H5PB_t *pb)
herr_t
H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/)
{
- H5PB_t *pb_ptr; /* Page buffer for this file */
+ H5PB_t *page_buf; /* Page buffer for this file */
hbool_t bypass_pb = FALSE; /* Whether to bypass page buffering */
hbool_t split_read = FALSE; /* whether the read must be split */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1092,22 +1081,22 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
/* Sanity checks */
HDassert(shared);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- if (pb_ptr != NULL && type != H5FD_MEM_DRAW)
- H5PB_count_meta_access_by_size(pb_ptr, size);
+ if (page_buf != NULL && type != H5FD_MEM_DRAW)
+ H5PB_count_meta_access_by_size(page_buf, size);
- if (pb_ptr == NULL) {
+ if (page_buf == NULL) {
bypass_pb = TRUE; /* case 1) -- page buffer is disabled */
}
else {
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
if (H5FD_MEM_DRAW == type) { /* raw data read */
- if ((pb_ptr->min_md_pages == pb_ptr->max_pages) || (pb_ptr->vfd_swmr)) {
+ if ((page_buf->min_md_pages == page_buf->max_pages) || (page_buf->vfd_swmr)) {
/* case 2) -- page buffer configured for metadata only
* or vfd swmr.
@@ -1117,7 +1106,7 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
}
else { /* metadata read */
- if (pb_ptr->min_rd_pages == pb_ptr->max_pages) {
+ if (page_buf->min_rd_pages == page_buf->max_pages) {
/* case 5) -- page buffer configured for raw data only */
bypass_pb = TRUE;
@@ -1152,20 +1141,20 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
haddr_t end_addr; /* addr of last byte in read */
/* Calculate the aligned address of the first page */
- start_page = (addr / pb_ptr->page_size);
- start_page_addr = start_page * pb_ptr->page_size;
+ start_page = (addr / page_buf->page_size);
+ start_page_addr = start_page * page_buf->page_size;
/* Calculate the aligned address of the last page */
end_addr = addr + (haddr_t)(size - 1);
- end_page = end_addr / (haddr_t)(pb_ptr->page_size);
- end_page_addr = end_page * pb_ptr->page_size;
+ end_page = end_addr / (haddr_t)(page_buf->page_size);
+ end_page_addr = end_page * page_buf->page_size;
HDassert(start_page_addr <= addr);
- HDassert(addr < start_page_addr + (haddr_t)(pb_ptr->page_size));
+ HDassert(addr < start_page_addr + (haddr_t)(page_buf->page_size));
HDassert(start_page <= end_page);
HDassert(end_page_addr <= ((addr + (haddr_t)size - 1)));
- HDassert((addr + (haddr_t)size - 1) < (end_page_addr + pb_ptr->page_size));
+ HDassert((addr + (haddr_t)size - 1) < (end_page_addr + page_buf->page_size));
/* test to see if the read crosses a page boundary, and
* does not start on a page boundary, and is not of an
@@ -1173,7 +1162,7 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
*/
if ((start_page < end_page) &&
(!((addr == start_page_addr) &&
- (end_page_addr + (haddr_t)(pb_ptr->page_size) == end_addr + 1)))) {
+ (end_page_addr + (haddr_t)(page_buf->page_size) == end_addr + 1)))) {
/* the read crosses a page boundary and is not
* page aligned and of length some multiple of page size.
@@ -1222,7 +1211,7 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
*/
second_page = start_page + 1;
- second_page_addr = (haddr_t)(second_page * pb_ptr->page_size);
+ second_page_addr = (haddr_t)(second_page * page_buf->page_size);
if (addr > start_page_addr) { /* prefix exists */
@@ -1230,11 +1219,11 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
prefix_size = (size_t)(second_page_addr - addr);
HDassert(prefix_addr > start_page_addr);
- HDassert(prefix_size < pb_ptr->page_size);
- HDassert(((size_t)(addr - start_page_addr) + prefix_size) == pb_ptr->page_size);
+ HDassert(prefix_size < page_buf->page_size);
+ HDassert(((size_t)(addr - start_page_addr) + prefix_size) == page_buf->page_size);
}
- if (size - prefix_size >= pb_ptr->page_size) {
+ if (size - prefix_size >= page_buf->page_size) {
/* body exists */
@@ -1249,29 +1238,29 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
body_addr = second_page_addr;
}
- if (end_addr < end_page_addr + (haddr_t)(pb_ptr->page_size - 1)) {
+ if (end_addr < end_page_addr + (haddr_t)(page_buf->page_size - 1)) {
/* suffix exists */
- body_size = (size_t)(end_page - body_page) * pb_ptr->page_size;
+ body_size = (size_t)(end_page - body_page) * page_buf->page_size;
}
else {
/* suffix is empty */
- body_size = (size_t)(end_page - body_page + 1) * pb_ptr->page_size;
+ body_size = (size_t)(end_page - body_page + 1) * page_buf->page_size;
}
HDassert((body_page == start_page) || (body_page == start_page + 1));
- HDassert(body_addr == (haddr_t)(body_page * pb_ptr->page_size));
+ HDassert(body_addr == (haddr_t)(body_page * page_buf->page_size));
HDassert(body_size < size);
- HDassert(body_size >= pb_ptr->page_size);
+ HDassert(body_size >= page_buf->page_size);
HDassert(body_addr == addr + (haddr_t)prefix_size);
HDassert((body_addr + (haddr_t)body_size) <= (end_addr + 1));
}
- if (end_addr < end_page_addr + (haddr_t)(pb_ptr->page_size - 1)) {
+ if (end_addr < end_page_addr + (haddr_t)(page_buf->page_size - 1)) {
suffix_addr = end_page_addr;
suffix_size = (end_addr + 1) - end_page_addr;
@@ -1303,9 +1292,9 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "read through failed")
/* Update statistics */
- if (pb_ptr) {
+ if (page_buf) {
- H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size);
}
}
else {
@@ -1346,7 +1335,7 @@ H5PB_read(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "H5PB_read_meta() failed on suffix")
}
- H5PB__UPDATE_STATS_FOR_READ_SPLIT(pb_ptr)
+ H5PB__UPDATE_STATS_FOR_READ_SPLIT(page_buf)
}
else { /* pass to H5PB_read_meta() -- cases 6, 7, 8, 9, & 10 */
@@ -1449,7 +1438,7 @@ herr_t
H5PB_remove_entry(H5F_shared_t *shared, haddr_t addr)
{
uint64_t page;
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1457,30 +1446,30 @@ H5PB_remove_entry(H5F_shared_t *shared, haddr_t addr)
/* Sanity checks */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
/* Calculate the page offset */
- page = (addr / pb_ptr->page_size);
+ page = (addr / page_buf->page_size);
- HDassert(addr == page * pb_ptr->page_size);
+ HDassert(addr == page * page_buf->page_size);
/* Search for page in the hash table */
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL)
if (entry_ptr) {
HDassert(entry_ptr->addr == addr);
/* A page or a metadata multi-page with vfd_swmr_writer (case 7) */
- HDassert((entry_ptr->size == pb_ptr->page_size) ||
- (entry_ptr->size > pb_ptr->page_size && entry_ptr->mem_type != H5FD_MEM_DRAW &&
- pb_ptr->vfd_swmr_writer));
+ HDassert((entry_ptr->size == page_buf->page_size) ||
+ (entry_ptr->size > page_buf->page_size && entry_ptr->mem_type != H5FD_MEM_DRAW &&
+ page_buf->vfd_swmr_writer));
if (entry_ptr->modified_this_tick) {
- H5PB__REMOVE_FROM_TL(pb_ptr, entry_ptr, FAIL);
+ H5PB__REMOVE_FROM_TL(page_buf, entry_ptr, FAIL);
entry_ptr->modified_this_tick = FALSE;
}
@@ -1489,16 +1478,16 @@ H5PB_remove_entry(H5F_shared_t *shared, haddr_t addr)
entry_ptr->delay_write_until = 0;
- H5PB__REMOVE_FROM_DWL(pb_ptr, entry_ptr, FAIL)
+ H5PB__REMOVE_FROM_DWL(page_buf, entry_ptr, FAIL)
if (!(entry_ptr->is_mpmde)) {
- H5PB__UPDATE_RP_FOR_INSERTION(pb_ptr, entry_ptr, FAIL);
+ H5PB__UPDATE_RP_FOR_INSERTION(page_buf, entry_ptr, FAIL);
}
}
/* if the entry is dirty, mark it clean before we evict */
- if ((entry_ptr->is_dirty) && (H5PB__mark_entry_clean(pb_ptr, entry_ptr) < 0))
+ if ((entry_ptr->is_dirty) && (H5PB__mark_entry_clean(page_buf, entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry clean failed")
@@ -1620,7 +1609,7 @@ H5PB_remove_entries(H5F_shared_t *shared, haddr_t addr, hsize_t size)
uint64_t end_page;
int64_t entry_pages = 0;
hsize_t entry_size;
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1628,25 +1617,25 @@ H5PB_remove_entries(H5F_shared_t *shared, haddr_t addr, hsize_t size)
/* Sanity checks */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
/* Calculate the start_page offset */
- start_page = (addr / pb_ptr->page_size);
+ start_page = (addr / page_buf->page_size);
- HDassert(addr == start_page * pb_ptr->page_size);
+ HDassert(addr == start_page * page_buf->page_size);
/* Calculate the end_page offset */
- end_page = ((addr + (haddr_t)(size - 1)) / pb_ptr->page_size);
+ end_page = ((addr + (haddr_t)(size - 1)) / page_buf->page_size);
HDassert(start_page <= end_page);
- HDassert(((end_page - start_page) * pb_ptr->page_size) <= size);
- HDassert(size <= ((end_page - start_page + 1) * pb_ptr->page_size));
+ HDassert(((end_page - start_page) * page_buf->page_size) <= size);
+ HDassert(size <= ((end_page - start_page + 1) * page_buf->page_size));
for (i = start_page; i <= end_page; i++) {
/* test to see if page i exists */
- H5PB__SEARCH_INDEX(pb_ptr, i, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, i, entry_ptr, FAIL)
if (entry_ptr) {
@@ -1656,9 +1645,9 @@ H5PB_remove_entries(H5F_shared_t *shared, haddr_t addr, hsize_t size)
HDassert(entry_pages <= 0);
entry_size = entry_ptr->size;
- entry_pages = (int64_t)(entry_size / pb_ptr->page_size);
+ entry_pages = (int64_t)(entry_size / page_buf->page_size);
- if ((uint64_t)entry_pages * pb_ptr->page_size < entry_size) {
+ if ((uint64_t)entry_pages * page_buf->page_size < entry_size) {
entry_pages++;
}
@@ -1712,7 +1701,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5PB_update_entry(H5PB_t *pb_ptr, haddr_t addr, size_t size, const void *buf)
+H5PB_update_entry(H5PB_t *page_buf, haddr_t addr, size_t size, const void *buf)
{
uint64_t page;
size_t offset;
@@ -1723,27 +1712,27 @@ H5PB_update_entry(H5PB_t *pb_ptr, haddr_t addr, size_t size, const void *buf)
FUNC_ENTER_NOAPI(FAIL)
/* Sanity checks */
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDassert(size > 0);
- HDassert(size <= pb_ptr->page_size);
+ HDassert(size <= page_buf->page_size);
HDassert(buf);
- if (pb_ptr->min_rd_pages < pb_ptr->max_pages) {
+ if (page_buf->min_rd_pages < page_buf->max_pages) {
/* page buffer is configured to accept metadata pages */
/* Calculate the aligned address of the containing page */
- page = (addr / pb_ptr->page_size);
- page_addr = page * pb_ptr->page_size;
+ page = (addr / page_buf->page_size);
+ page_addr = page * page_buf->page_size;
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL)
if (entry_ptr) {
HDassert(entry_ptr->is_metadata);
HDassert(!(entry_ptr->is_mpmde));
- HDassert(addr + size <= page_addr + pb_ptr->page_size);
+ HDassert(addr + size <= page_addr + page_buf->page_size);
offset = addr - page_addr;
@@ -1752,7 +1741,7 @@ H5PB_update_entry(H5PB_t *pb_ptr, haddr_t addr, size_t size, const void *buf)
/* should we mark the page dirty? If so, replace the following
* with a call to H5PB__mark_entry_dirty()
*/
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
}
@@ -1796,7 +1785,7 @@ done:
herr_t
H5PB_vfd_swmr__release_delayed_writes(H5F_shared_t *shared)
{
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1807,25 +1796,25 @@ H5PB_vfd_swmr__release_delayed_writes(H5F_shared_t *shared)
HDassert(shared->vfd_swmr);
HDassert(shared->vfd_swmr_writer);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->vfd_swmr_writer);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->vfd_swmr_writer);
- while (pb_ptr->dwl_tail_ptr && pb_ptr->dwl_tail_ptr->delay_write_until <= shared->tick_num) {
+ while (page_buf->dwl_tail_ptr && page_buf->dwl_tail_ptr->delay_write_until <= shared->tick_num) {
- entry_ptr = pb_ptr->dwl_tail_ptr;
+ entry_ptr = page_buf->dwl_tail_ptr;
HDassert(entry_ptr->is_dirty);
entry_ptr->delay_write_until = 0;
- H5PB__REMOVE_FROM_DWL(pb_ptr, entry_ptr, FAIL)
+ H5PB__REMOVE_FROM_DWL(page_buf, entry_ptr, FAIL)
if (entry_ptr->is_mpmde) { /* flush and evict now */
- if (H5PB__flush_entry(shared, pb_ptr, entry_ptr) < 0)
+ if (H5PB__flush_entry(shared, page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "flush of mpmde failed")
@@ -1835,7 +1824,7 @@ H5PB_vfd_swmr__release_delayed_writes(H5F_shared_t *shared)
}
else { /* insert it in the replacement policy */
- H5PB__UPDATE_RP_FOR_INSERT_APPEND(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_INSERT_APPEND(page_buf, entry_ptr, FAIL)
}
}
@@ -1868,7 +1857,7 @@ done:
herr_t
H5PB_vfd_swmr__release_tick_list(H5F_shared_t *shared)
{
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1879,18 +1868,18 @@ H5PB_vfd_swmr__release_tick_list(H5F_shared_t *shared)
HDassert(shared->vfd_swmr);
HDassert(shared->vfd_swmr_writer);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->vfd_swmr_writer);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->vfd_swmr_writer);
/* remove all entries from the tick list */
- while (pb_ptr->tl_head_ptr) {
+ while (page_buf->tl_head_ptr) {
- entry_ptr = pb_ptr->tl_head_ptr;
+ entry_ptr = page_buf->tl_head_ptr;
- H5PB__REMOVE_FROM_TL(pb_ptr, entry_ptr, FAIL)
+ H5PB__REMOVE_FROM_TL(page_buf, entry_ptr, FAIL)
entry_ptr->modified_this_tick = FALSE;
@@ -1901,7 +1890,7 @@ H5PB_vfd_swmr__release_tick_list(H5F_shared_t *shared)
if (entry_ptr->delay_write_until == 0) {
/* flush and evict the multi-page metadata entry immediately */
- if (H5PB__flush_entry(shared, pb_ptr, entry_ptr) < 0)
+ if (H5PB__flush_entry(shared, page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "flush of mpmde failed")
@@ -1916,10 +1905,10 @@ H5PB_vfd_swmr__release_tick_list(H5F_shared_t *shared)
*/
}
- HDassert(pb_ptr->tl_head_ptr == NULL);
- HDassert(pb_ptr->tl_tail_ptr == NULL);
- HDassert(pb_ptr->tl_len == 0);
- HDassert(pb_ptr->tl_size == 0);
+ HDassert(page_buf->tl_head_ptr == NULL);
+ HDassert(page_buf->tl_tail_ptr == NULL);
+ HDassert(page_buf->tl_len == 0);
+ HDassert(page_buf->tl_size == 0);
done:
@@ -1948,7 +1937,7 @@ done:
herr_t
H5PB_vfd_swmr__set_tick(H5F_shared_t *shared)
{
- H5PB_t *pb_ptr = NULL;
+ H5PB_t *page_buf = NULL;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1958,20 +1947,20 @@ H5PB_vfd_swmr__set_tick(H5F_shared_t *shared)
HDassert(shared->vfd_swmr);
HDassert(shared->vfd_swmr_writer);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->vfd_swmr_writer);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->vfd_swmr_writer);
/* the tick must always increase by 1 -- verify this */
- if (shared->tick_num != pb_ptr->cur_tick + 1)
+ if (shared->tick_num != page_buf->cur_tick + 1)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL,
- "shared->tick_num (%" PRIu64 ") != (%" PRIu64 ") pb_ptr->cur_tick + 1 ?!?!",
- shared->tick_num, pb_ptr->cur_tick)
+ "shared->tick_num (%" PRIu64 ") != (%" PRIu64 ") page_buf->cur_tick + 1 ?!?!",
+ shared->tick_num, page_buf->cur_tick)
- pb_ptr->cur_tick = shared->tick_num;
+ page_buf->cur_tick = shared->tick_num;
done:
@@ -2076,7 +2065,7 @@ H5PB_vfd_swmr__update_index(H5F_t *f, uint32_t *idx_ent_added_ptr, uint32_t *idx
uint32_t idx_ent_modified = 0;
uint32_t idx_ent_not_in_tl = 0;
uint32_t idx_ent_not_in_tl_flushed = 0;
- H5PB_t * pb_ptr = NULL;
+ H5PB_t * page_buf = NULL;
H5PB_entry_t * entry;
H5FD_vfd_swmr_idx_entry_t *ie_ptr = NULL;
H5FD_vfd_swmr_idx_entry_t *idx = NULL;
@@ -2091,11 +2080,11 @@ H5PB_vfd_swmr__update_index(H5F_t *f, uint32_t *idx_ent_added_ptr, uint32_t *idx
HDassert(idx);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->vfd_swmr_writer);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->vfd_swmr_writer);
HDassert(idx_ent_added_ptr);
HDassert(idx_ent_modified_ptr);
@@ -2106,7 +2095,7 @@ H5PB_vfd_swmr__update_index(H5F_t *f, uint32_t *idx_ent_added_ptr, uint32_t *idx
* as appropriate.
*/
- for (entry = pb_ptr->tl_head_ptr; entry != NULL; entry = entry->tl_next) {
+ for (entry = page_buf->tl_head_ptr; entry != NULL; entry = entry->tl_next) {
uint64_t target_page = entry->page;
HDassert(entry->magic == H5PB__H5PB_ENTRY_T_MAGIC);
@@ -2187,7 +2176,7 @@ H5PB_vfd_swmr__update_index(H5F_t *f, uint32_t *idx_ent_added_ptr, uint32_t *idx
if (ie_ptr->clean)
continue;
- H5PB__SEARCH_INDEX(pb_ptr, ie_ptr->hdf5_page_offset, entry, FAIL);
+ H5PB__SEARCH_INDEX(page_buf, ie_ptr->hdf5_page_offset, entry, FAIL);
if (entry == NULL || !entry->is_dirty) {
idx_ent_not_in_tl_flushed++;
@@ -2357,7 +2346,7 @@ done:
herr_t
H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf)
{
- H5PB_t *pb_ptr; /* Page buffer for this file */
+ H5PB_t *page_buf; /* Page buffer for this file */
hbool_t bypass_pb = FALSE; /* Whether to bypass page buffering */
hbool_t split_write = FALSE; /* whether md write must be split */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2372,22 +2361,22 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
FUNC_ENTER_NOAPI(FAIL)
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- if (pb_ptr != NULL && type != H5FD_MEM_DRAW)
- H5PB_count_meta_access_by_size(pb_ptr, size);
+ if (page_buf != NULL && type != H5FD_MEM_DRAW)
+ H5PB_count_meta_access_by_size(page_buf, size);
- if (pb_ptr == NULL) {
+ if (page_buf == NULL) {
bypass_pb = TRUE; /* case 1) -- page buffer is disabled */
}
else {
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
if (H5FD_MEM_DRAW == type) { /* raw data write */
- if ((pb_ptr->min_md_pages == pb_ptr->max_pages) || (pb_ptr->vfd_swmr)) {
+ if ((page_buf->min_md_pages == page_buf->max_pages) || (page_buf->vfd_swmr)) {
/* case 2) -- page buffer configured for metadata only */
bypass_pb = TRUE;
@@ -2395,7 +2384,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
}
else { /* metadata write */
- if (pb_ptr->min_rd_pages == pb_ptr->max_pages) {
+ if (page_buf->min_rd_pages == page_buf->max_pages) {
/* case 5) -- page buffer configured for raw data only */
bypass_pb = TRUE;
@@ -2431,20 +2420,20 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
haddr_t end_addr; /* addr of last byte in read */
/* Calculate the aligned address of the first page */
- start_page = (addr / pb_ptr->page_size);
- start_page_addr = start_page * pb_ptr->page_size;
+ start_page = (addr / page_buf->page_size);
+ start_page_addr = start_page * page_buf->page_size;
/* Calculate the aligned address of the last page */
end_addr = addr + (haddr_t)(size - 1);
- end_page = end_addr / (haddr_t)(pb_ptr->page_size);
- end_page_addr = end_page * pb_ptr->page_size;
+ end_page = end_addr / (haddr_t)(page_buf->page_size);
+ end_page_addr = end_page * page_buf->page_size;
HDassert(start_page_addr <= addr);
- HDassert(addr < start_page_addr + (haddr_t)(pb_ptr->page_size));
+ HDassert(addr < start_page_addr + (haddr_t)(page_buf->page_size));
HDassert(start_page <= end_page);
HDassert(end_page_addr <= ((addr + (haddr_t)size - 1)));
- HDassert((addr + (haddr_t)size - 1) < (end_page_addr + pb_ptr->page_size));
+ HDassert((addr + (haddr_t)size - 1) < (end_page_addr + page_buf->page_size));
/* test to see if the write crosses a page boundary, and
* does not start on a page boundary, and is not of an
@@ -2452,7 +2441,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
*/
if ((start_page < end_page) &&
(!((addr == start_page_addr) &&
- (end_page_addr + (haddr_t)(pb_ptr->page_size) == end_addr + 1)))) {
+ (end_page_addr + (haddr_t)(page_buf->page_size) == end_addr + 1)))) {
/* the read crosses a page boundary and is not
* page aligned and of length some multiple of page size.
@@ -2473,9 +2462,9 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
else {
HDassert(addr == start_page_addr);
- HDassert(size > pb_ptr->page_size);
+ HDassert(size > page_buf->page_size);
- if (!pb_ptr->vfd_swmr_writer) {
+ if (!page_buf->vfd_swmr_writer) {
/* case 6) -- multi-page entry with fixed /
* extensible array filtered out, and no
@@ -2485,7 +2474,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
}
}
}
- else if ((size > pb_ptr->page_size) && (!pb_ptr->vfd_swmr_writer)) {
+ else if ((size > page_buf->page_size) && (!page_buf->vfd_swmr_writer)) {
/* write is larger than page size and we are not
* in VFD SWMR mode -- bypass the page buffer.
@@ -2528,7 +2517,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
*/
second_page = start_page + 1;
- second_page_addr = (haddr_t)(second_page * pb_ptr->page_size);
+ second_page_addr = (haddr_t)(second_page * page_buf->page_size);
if (addr > start_page_addr) { /* prefix exists */
@@ -2536,11 +2525,11 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
prefix_size = (size_t)(second_page_addr - addr);
HDassert(prefix_addr > start_page_addr);
- HDassert(prefix_size < pb_ptr->page_size);
- HDassert(((size_t)(addr - start_page_addr) + prefix_size) == pb_ptr->page_size);
+ HDassert(prefix_size < page_buf->page_size);
+ HDassert(((size_t)(addr - start_page_addr) + prefix_size) == page_buf->page_size);
}
- if (size - prefix_size >= pb_ptr->page_size) {
+ if (size - prefix_size >= page_buf->page_size) {
/* body exists */
@@ -2555,29 +2544,29 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
body_addr = second_page_addr;
}
- if (end_addr < end_page_addr + (haddr_t)(pb_ptr->page_size - 1)) {
+ if (end_addr < end_page_addr + (haddr_t)(page_buf->page_size - 1)) {
/* suffix exists */
- body_size = (size_t)(end_page - body_page) * pb_ptr->page_size;
+ body_size = (size_t)(end_page - body_page) * page_buf->page_size;
}
else {
/* suffix is empty */
- body_size = (size_t)(end_page - body_page + 1) * pb_ptr->page_size;
+ body_size = (size_t)(end_page - body_page + 1) * page_buf->page_size;
}
HDassert((body_page == start_page) || (body_page == start_page + 1));
- HDassert(body_addr == (haddr_t)(body_page * pb_ptr->page_size));
+ HDassert(body_addr == (haddr_t)(body_page * page_buf->page_size));
HDassert(body_size < size);
- HDassert(body_size >= pb_ptr->page_size);
+ HDassert(body_size >= page_buf->page_size);
HDassert(body_addr == addr + (haddr_t)prefix_size);
HDassert((body_addr + (haddr_t)body_size) <= (end_addr + 1));
}
- if (end_addr < end_page_addr + (haddr_t)(pb_ptr->page_size - 1)) {
+ if (end_addr < end_page_addr + (haddr_t)(page_buf->page_size - 1)) {
suffix_addr = end_page_addr;
suffix_size = (end_addr + 1) - end_page_addr;
@@ -2609,9 +2598,9 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "write through lower VFD failed")
/* Update statistics */
- if (pb_ptr) {
+ if (page_buf) {
- H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size);
}
}
else {
@@ -2637,7 +2626,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
/* write the body if it exists */
if (body_size > 0) {
- /* The "body_size == pb_ptr->page_size" clause in the
+ /* The "body_size == page_buf->page_size" clause in the
* following if is required since in normal operating
* mode, the page buffer buffers metadata I/O
* requests of page size or less.
@@ -2655,7 +2644,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
*
* JRM 4/19/20
*/
- if ((pb_ptr->vfd_swmr) || (body_size == pb_ptr->page_size)) {
+ if ((page_buf->vfd_swmr) || (body_size == page_buf->page_size)) {
if (H5PB__write_meta(shared, type, body_addr, body_size,
(const void *)((const uint8_t *)buf + prefix_size)) < 0)
@@ -2669,7 +2658,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "write through of body failed")
- H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size);
}
}
@@ -2682,7 +2671,7 @@ H5PB_write(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, con
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "H5PB_write_meta() failed on suffix")
}
- H5PB__UPDATE_STATS_FOR_WRITE_SPLIT(pb_ptr)
+ H5PB__UPDATE_STATS_FOR_WRITE_SPLIT(page_buf)
}
else { /* cases 7, and 8 */
@@ -2708,8 +2697,8 @@ done:
*
* Purpose: Allocate an instance of H5PB_entry_t and its associated
* buffer. The supplied size must be greater than or
- * equal to pb_ptr->page_size, and equal to that value if
- * pb_ptr->vfd_swmr_writer is FALSE.
+ * equal to page_buf->page_size, and equal to that value if
+ * page_buf->vfd_swmr_writer is FALSE.
*
* The associated buffer is zeroed if clean_image is TRUE.
*
@@ -2723,7 +2712,7 @@ done:
*-------------------------------------------------------------------------
*/
static H5PB_entry_t *
-H5PB__allocate_page(H5PB_t *pb_ptr, size_t size, hbool_t clean_image)
+H5PB__allocate_page(H5PB_t *page_buf, size_t size, hbool_t clean_image)
{
H5PB_entry_t *entry_ptr = NULL;
void * image_ptr = NULL;
@@ -2732,10 +2721,10 @@ H5PB__allocate_page(H5PB_t *pb_ptr, size_t size, hbool_t clean_image)
FUNC_ENTER_NOAPI(NULL)
/* sanity checks */
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(size >= pb_ptr->page_size);
- HDassert((size == pb_ptr->page_size) || (pb_ptr->vfd_swmr_writer));
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(size >= page_buf->page_size);
+ HDassert((size == page_buf->page_size) || (page_buf->vfd_swmr_writer));
/* allocate the entry and its associated image buffer */
if (NULL == (entry_ptr = H5FL_MALLOC(H5PB_entry_t)))
@@ -2757,7 +2746,7 @@ H5PB__allocate_page(H5PB_t *pb_ptr, size_t size, hbool_t clean_image)
/* initialize the new page buffer entry */
entry_ptr->magic = H5PB__H5PB_ENTRY_T_MAGIC;
- entry_ptr->pb_ptr = pb_ptr;
+ entry_ptr->page_buf = page_buf;
entry_ptr->addr = HADDR_UNDEF;
entry_ptr->page = 0;
entry_ptr->size = size;
@@ -2827,7 +2816,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5PB__create_new_page(H5PB_t *pb_ptr, haddr_t addr, size_t size, H5FD_mem_t type, hbool_t clean_image,
+H5PB__create_new_page(H5PB_t *page_buf, haddr_t addr, size_t size, H5FD_mem_t type, hbool_t clean_image,
H5PB_entry_t **entry_ptr_ptr)
{
hbool_t inserted_in_index = FALSE;
@@ -2839,17 +2828,17 @@ H5PB__create_new_page(H5PB_t *pb_ptr, haddr_t addr, size_t size, H5FD_mem_t type
FUNC_ENTER_NOAPI(FAIL)
/* Sanity checks */
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
- page = (uint64_t)addr / (uint64_t)(pb_ptr->page_size);
+ page = (uint64_t)addr / (uint64_t)(page_buf->page_size);
- HDassert((uint64_t)(addr) == (page * (uint64_t)(pb_ptr->page_size)));
+ HDassert((uint64_t)(addr) == (page * (uint64_t)(page_buf->page_size)));
- HDassert(size >= pb_ptr->page_size);
- HDassert((size == pb_ptr->page_size) || ((pb_ptr->vfd_swmr_writer) && (type != H5FD_MEM_DRAW)));
+ HDassert(size >= page_buf->page_size);
+ HDassert((size == page_buf->page_size) || ((page_buf->vfd_swmr_writer) && (type != H5FD_MEM_DRAW)));
HDassert((NULL == entry_ptr_ptr) || (NULL == *entry_ptr_ptr));
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL);
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL);
if (entry_ptr != NULL) {
@@ -2857,36 +2846,36 @@ H5PB__create_new_page(H5PB_t *pb_ptr, haddr_t addr, size_t size, H5FD_mem_t type
"page buffer already contains a page at the specified address")
}
- entry_ptr = H5PB__allocate_page(pb_ptr, size, clean_image);
+ entry_ptr = H5PB__allocate_page(page_buf, size, clean_image);
if (NULL == entry_ptr)
HGOTO_ERROR(H5E_PAGEBUF, H5E_NOSPACE, FAIL, "Can't allocate new page buffer entry")
/* perform additional initialization */
HDassert(entry_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
- HDassert(entry_ptr->pb_ptr == pb_ptr);
+ HDassert(entry_ptr->page_buf == page_buf);
entry_ptr->addr = addr;
entry_ptr->page = page;
HDassert(entry_ptr->size == size);
HDassert(entry_ptr->image_ptr);
entry_ptr->mem_type = type;
entry_ptr->is_metadata = (type != H5FD_MEM_DRAW);
- entry_ptr->is_mpmde = ((entry_ptr->is_metadata) && (size > pb_ptr->page_size));
+ entry_ptr->is_mpmde = ((entry_ptr->is_metadata) && (size > page_buf->page_size));
entry_ptr->is_dirty = FALSE;
/* insert in the hash table */
- H5PB__INSERT_IN_INDEX(pb_ptr, entry_ptr, FAIL)
+ H5PB__INSERT_IN_INDEX(page_buf, entry_ptr, FAIL)
inserted_in_index = TRUE;
/* insert at the head of the LRU if it isn't a multi-page metadata entry */
if (!entry_ptr->is_mpmde) {
- H5PB__UPDATE_RP_FOR_INSERTION(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_INSERTION(page_buf, entry_ptr, FAIL)
inserted_in_lru = TRUE;
}
/* updates stats */
- H5PB__UPDATE_STATS_FOR_INSERTION(pb_ptr, entry_ptr);
+ H5PB__UPDATE_STATS_FOR_INSERTION(page_buf, entry_ptr);
if (entry_ptr_ptr) {
@@ -2901,12 +2890,12 @@ done:
if (inserted_in_lru) {
- H5PB__UPDATE_RP_FOR_EVICTION(pb_ptr, entry_ptr, FAIL);
+ H5PB__UPDATE_RP_FOR_EVICTION(page_buf, entry_ptr, FAIL);
}
if (inserted_in_index) {
- H5PB__DELETE_FROM_INDEX(pb_ptr, entry_ptr, FAIL)
+ H5PB__DELETE_FROM_INDEX(page_buf, entry_ptr, FAIL)
}
H5PB__deallocate_page(entry_ptr);
@@ -3001,14 +2990,14 @@ H5PB__deallocate_page(H5PB_entry_t *entry_ptr)
static herr_t
H5PB__evict_entry(H5F_shared_t *shared, H5PB_entry_t *entry_ptr, hbool_t force, hbool_t only_mark)
{
- H5PB_t *pb_ptr = shared->pb_ptr;
+ H5PB_t *page_buf = shared->page_buf;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
/* sanity checks */
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDassert(entry_ptr);
HDassert(entry_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
HDassert(entry_ptr->size > 0);
@@ -3025,27 +3014,27 @@ H5PB__evict_entry(H5F_shared_t *shared, H5PB_entry_t *entry_ptr, hbool_t force,
if (!force) {
- /* it is OK to evict an metadata page if pb_ptr->curr_md_pages ==
- * pb_ptr->min_md_pages - 1 if we are about to replace it with another
+ /* it is OK to evict an metadata page if page_buf->curr_md_pages ==
+ * page_buf->min_md_pages - 1 if we are about to replace it with another
* metadata page.
*
* Similarly, it is OK to evict an raw data page if
- * pb_ptr->curr_rd_pages == pb_ptr->min_rd_pages - 1 if we are
+ * page_buf->curr_rd_pages == page_buf->min_rd_pages - 1 if we are
* about to replace it with another raw data page.
*
* Assume sanity checks have been made before this call, and
* allow the above without testing the intended replacement.
*/
- if ((entry_ptr->is_metadata) && (pb_ptr->curr_md_pages < pb_ptr->min_md_pages)) {
+ if ((entry_ptr->is_metadata) && (page_buf->curr_md_pages < page_buf->min_md_pages)) {
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "Attempt to violate min_md_pages");
}
- else if ((!entry_ptr->is_metadata) && (pb_ptr->curr_rd_pages < pb_ptr->min_rd_pages)) {
+ else if ((!entry_ptr->is_metadata) && (page_buf->curr_rd_pages < page_buf->min_rd_pages)) {
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "Attempt to violate min_rd_pages");
}
}
- else if ((entry_ptr->is_dirty) && (H5PB__mark_entry_clean(pb_ptr, entry_ptr) < 0)) {
+ else if ((entry_ptr->is_dirty) && (H5PB__mark_entry_clean(page_buf, entry_ptr) < 0)) {
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry clean failed")
}
@@ -3053,11 +3042,11 @@ H5PB__evict_entry(H5F_shared_t *shared, H5PB_entry_t *entry_ptr, hbool_t force,
/* if the entry is in the replacement policy, remove it */
if (!(entry_ptr->is_mpmde)) {
- H5PB__UPDATE_RP_FOR_EVICTION(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_EVICTION(page_buf, entry_ptr, FAIL)
}
/* remove the entry from the hash table */
- H5PB__DELETE_FROM_INDEX(pb_ptr, entry_ptr, FAIL)
+ H5PB__DELETE_FROM_INDEX(page_buf, entry_ptr, FAIL)
/* We need to remove the entry from the shadow file index in
* the VFD SWMR case.
@@ -3083,7 +3072,7 @@ H5PB__evict_entry(H5F_shared_t *shared, H5PB_entry_t *entry_ptr, hbool_t force,
}
/* update stats for eviction */
- H5PB__UPDATE_STATS_FOR_EVICTION(pb_ptr, entry_ptr)
+ H5PB__UPDATE_STATS_FOR_EVICTION(page_buf, entry_ptr)
/* deallocate the page */
H5PB__deallocate_page(entry_ptr);
@@ -3104,7 +3093,7 @@ done:
* replacement policy. In this, also update the replacement
* policy for flush.
*
- * If pb_ptr->vfd_swmr_writer, it is possible that the target
+ * If page_buf->vfd_swmr_writer, it is possible that the target
* is a multi-page metadata entry. In this case, the entry
* is not in the replacement policy, and thus the policy
* should not be updated.
@@ -3118,7 +3107,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5PB__flush_entry(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *const entry_ptr)
+H5PB__flush_entry(H5F_shared_t *shared, H5PB_t *page_buf, H5PB_entry_t *const entry_ptr)
{
haddr_t eoa; /* Current EOA for the file */
herr_t ret_value = SUCCEED; /* Return value */
@@ -3128,16 +3117,16 @@ H5PB__flush_entry(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *const entr
/* sanity checks */
HDassert(shared);
HDassert(shared->lf);
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDassert(entry_ptr);
HDassert(entry_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
HDassert(entry_ptr->size > 0);
- HDassert(entry_ptr->size >= pb_ptr->page_size);
- HDassert((entry_ptr->size == pb_ptr->page_size) || (entry_ptr->is_mpmde));
+ HDassert(entry_ptr->size >= page_buf->page_size);
+ HDassert((entry_ptr->size == page_buf->page_size) || (entry_ptr->is_mpmde));
HDassert(entry_ptr->image_ptr);
HDassert(entry_ptr->is_dirty);
- HDassert((pb_ptr->vfd_swmr_writer) || (!(entry_ptr->is_mpmde)));
+ HDassert((page_buf->vfd_swmr_writer) || (!(entry_ptr->is_mpmde)));
HDassert(0 == entry_ptr->delay_write_until);
/* Retrieve the 'eoa' for the file */
@@ -3170,7 +3159,7 @@ H5PB__flush_entry(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *const entr
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "file write failed")
/* mark the entry clean */
- if (H5PB__mark_entry_clean(pb_ptr, entry_ptr) < 0)
+ if (H5PB__mark_entry_clean(page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry clean failed")
@@ -3178,11 +3167,11 @@ H5PB__flush_entry(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *const entr
if (!entry_ptr->is_mpmde) {
HDassert(entry_ptr->delay_write_until == 0);
- H5PB__UPDATE_RP_FOR_FLUSH(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_FLUSH(page_buf, entry_ptr, FAIL)
}
/* update stats for flush */
- H5PB__UPDATE_STATS_FOR_FLUSH(pb_ptr, entry_ptr)
+ H5PB__UPDATE_STATS_FOR_FLUSH(page_buf, entry_ptr)
done:
@@ -3198,7 +3187,7 @@ done:
* it into the page buffer. If necessary and possible, make
* space for the new page first.
*
- * Note that the size of the page is always pb_ptr->page_size,
+ * Note that the size of the page is always page_buf->page_size,
* even in the VFD SWMR case, as in this context, multi-page
* metadata entries are always written in full, and they
* may only enter the page buffer as the result of a write.
@@ -3221,7 +3210,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5PB__load_page(H5F_shared_t *shared, H5PB_t *pb_ptr, haddr_t addr, H5FD_mem_t type,
+H5PB__load_page(H5F_shared_t *shared, H5PB_t *page_buf, haddr_t addr, H5FD_mem_t type,
H5PB_entry_t **entry_ptr_ptr)
{
hbool_t skip_read = FALSE;
@@ -3235,8 +3224,8 @@ H5PB__load_page(H5F_shared_t *shared, H5PB_t *pb_ptr, haddr_t addr, H5FD_mem_t t
/* sanity checks */
HDassert(shared);
HDassert(shared->lf);
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDassert((entry_ptr_ptr == NULL) || (*entry_ptr_ptr == NULL));
#if 0 /* JRM */
@@ -3246,7 +3235,7 @@ H5PB__load_page(H5F_shared_t *shared, H5PB_t *pb_ptr, haddr_t addr, H5FD_mem_t t
HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, \
"driver get_eoa request failed")
- if ( addr + ((haddr_t)(pb_ptr->page_size)) > eoa )
+ if ( addr + ((haddr_t)(page_buf->page_size)) > eoa )
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, \
"Attempt to load page that extends past EOA")
@@ -3266,12 +3255,12 @@ H5PB__load_page(H5F_shared_t *shared, H5PB_t *pb_ptr, haddr_t addr, H5FD_mem_t t
#endif
/* make space in the page buffer if necessary */
- if ((pb_ptr->curr_pages >= pb_ptr->max_pages) && (H5PB__make_space(shared, pb_ptr, type) < 0))
+ if ((page_buf->curr_pages >= page_buf->max_pages) && (H5PB__make_space(shared, page_buf, type) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "H5PB__make_space() reports an error")
/* Create a new page buffer page and insert it into the page buffer */
- if (H5PB__create_new_page(pb_ptr, addr, (size_t)(pb_ptr->page_size), type, skip_read, &entry_ptr) < 0)
+ if (H5PB__create_new_page(page_buf, addr, (size_t)(page_buf->page_size), type, skip_read, &entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "can't create new page buffer page")
@@ -3295,7 +3284,7 @@ H5PB__load_page(H5F_shared_t *shared, H5PB_t *pb_ptr, haddr_t addr, H5FD_mem_t t
*/
entry_ptr->loaded = !skip_read;
- H5PB__UPDATE_STATS_FOR_LOAD(pb_ptr, entry_ptr)
+ H5PB__UPDATE_STATS_FOR_LOAD(page_buf, entry_ptr)
if (entry_ptr_ptr) {
@@ -3315,7 +3304,7 @@ done:
* Function: H5PB__make_space
*
* Purpose: Evict one or more pages from the page buffer so as to
- * reduce the size of the page buffer to pb_ptr->max_pages - 1.
+ * reduce the size of the page buffer to page_buf->max_pages - 1.
* if possible.
*
* Note that the function must not be called under
@@ -3361,19 +3350,19 @@ done:
* is raw data, and curr_rd_pages == min_rd_pages.
*
* 3) The entry is not on the tick list (which can only
- * happen if pb_ptr->vfd_swmr_writer is TRUE).
+ * happen if page_buf->vfd_swmr_writer is TRUE).
*
- * evict the entry and test to see if pb_ptr->curr_pages <
- * pb_ptr->max_pages. If it is, return. Otherwise, continue
+ * evict the entry and test to see if page_buf->curr_pages <
+ * page_buf->max_pages. If it is, return. Otherwise, continue
* the scan until either the above condidtion is fulfilled,
* or the head of the LRU is reach.
*
* Under normal circumstances, it should always be possible
- * to reduce the size of the page buffer below pb_ptr->max_pages.
+ * to reduce the size of the page buffer below page_buf->max_pages.
* However, due to prohibition on evicting entries on the
* tick list, and either flushing or evicting entries on the
* delayed write list, this will not in general be the case
- * if pb_ptr->vfd_swmr_writer is TRUE. In this case, the
+ * if page_buf->vfd_swmr_writer is TRUE. In this case, the
* page buffer may exceed its maximum size by an arbitrary
* amount.
*
@@ -3391,7 +3380,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5PB__make_space(H5F_shared_t *shared, H5PB_t *pb_ptr, H5FD_mem_t inserted_type)
+H5PB__make_space(H5F_shared_t *shared, H5PB_t *page_buf, H5FD_mem_t inserted_type)
{
hbool_t inserting_md;
H5PB_entry_t *search_ptr;
@@ -3402,44 +3391,44 @@ H5PB__make_space(H5F_shared_t *shared, H5PB_t *pb_ptr, H5FD_mem_t inserted_type)
FUNC_ENTER_NOAPI(FAIL)
/* sanity checks */
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->min_md_pages + pb_ptr->min_rd_pages <= pb_ptr->max_pages);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->min_md_pages + page_buf->min_rd_pages <= page_buf->max_pages);
inserting_md = (H5FD_MEM_DRAW != inserted_type);
- if ((inserting_md) && (pb_ptr->min_rd_pages == pb_ptr->max_pages))
+ if ((inserting_md) && (page_buf->min_rd_pages == page_buf->max_pages))
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL,
"can't make space for metadata -- pb config for raw data only")
- if ((!inserting_md) && (pb_ptr->min_md_pages == pb_ptr->max_pages))
+ if ((!inserting_md) && (page_buf->min_md_pages == page_buf->max_pages))
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL,
"can't make space for raw data -- pb config for metadata only")
- search_ptr = pb_ptr->LRU_tail_ptr;
+ search_ptr = page_buf->LRU_tail_ptr;
- while ((search_ptr) && (pb_ptr->curr_pages >= pb_ptr->max_pages)) {
+ while ((search_ptr) && (page_buf->curr_pages >= page_buf->max_pages)) {
HDassert(search_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
if (search_ptr->modified_this_tick) { /* entry is on tick list */
search_ptr = search_ptr->prev;
- H5PB__UPDATE_STATS_FOR_LRU_TL_SKIP(pb_ptr);
+ H5PB__UPDATE_STATS_FOR_LRU_TL_SKIP(page_buf);
}
else if ((inserting_md) && (!(search_ptr->is_metadata)) &&
- (pb_ptr->curr_rd_pages <= pb_ptr->min_rd_pages)) {
+ (page_buf->curr_rd_pages <= page_buf->min_rd_pages)) {
search_ptr = search_ptr->prev;
- H5PB__UPDATE_STATS_FOR_LRU_RD_SKIP(pb_ptr);
+ H5PB__UPDATE_STATS_FOR_LRU_RD_SKIP(page_buf);
}
else if ((!inserting_md) && (search_ptr->is_metadata) &&
- (pb_ptr->curr_md_pages <= pb_ptr->min_md_pages)) {
+ (page_buf->curr_md_pages <= page_buf->min_md_pages)) {
search_ptr = search_ptr->prev;
- H5PB__UPDATE_STATS_FOR_LRU_MD_SKIP(pb_ptr);
+ H5PB__UPDATE_STATS_FOR_LRU_MD_SKIP(page_buf);
}
else if (search_ptr->is_dirty) {
@@ -3463,7 +3452,7 @@ H5PB__make_space(H5F_shared_t *shared, H5PB_t *pb_ptr, H5FD_mem_t inserted_type)
search_ptr = search_ptr->prev;
}
- if (H5PB__flush_entry(shared, pb_ptr, flush_ptr) < 0)
+ if (H5PB__flush_entry(shared, page_buf, flush_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "Can't flush entry")
}
@@ -3477,7 +3466,7 @@ H5PB__make_space(H5F_shared_t *shared, H5PB_t *pb_ptr, H5FD_mem_t inserted_type)
}
}
- HDassert((search_ptr == NULL) || (pb_ptr->curr_pages < pb_ptr->max_pages));
+ HDassert((search_ptr == NULL) || (page_buf->curr_pages < page_buf->max_pages));
done:
@@ -3509,28 +3498,28 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5PB__mark_entry_clean(H5PB_t *pb_ptr, H5PB_entry_t *entry_ptr)
+H5PB__mark_entry_clean(H5PB_t *page_buf, H5PB_entry_t *entry_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
/* sanity checks */
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDassert(entry_ptr);
HDassert(entry_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
HDassert(entry_ptr->size > 0);
- HDassert(entry_ptr->size >= pb_ptr->page_size);
- HDassert((entry_ptr->size == pb_ptr->page_size) || (entry_ptr->is_mpmde));
+ HDassert(entry_ptr->size >= page_buf->page_size);
+ HDassert((entry_ptr->size == page_buf->page_size) || (entry_ptr->is_mpmde));
HDassert(entry_ptr->image_ptr);
- HDassert((pb_ptr->vfd_swmr_writer) || (!(entry_ptr->is_mpmde)));
+ HDassert((page_buf->vfd_swmr_writer) || (!(entry_ptr->is_mpmde)));
/* mark the entry clean */
entry_ptr->is_dirty = FALSE;
/* update the index for the entry clean */
- H5PB__UPDATE_INDEX_FOR_ENTRY_CLEAN(pb_ptr, entry_ptr)
+ H5PB__UPDATE_INDEX_FOR_ENTRY_CLEAN(page_buf, entry_ptr)
/* don't update the replacement policy -- this will be done by
* the caller if desired.
@@ -3548,12 +3537,12 @@ done:
*
* Purpose: Mark the target entry as dirty.
*
- * If pb_ptr->vfd_swmr_writer is FALSE, the entry will be
+ * If page_buf->vfd_swmr_writer is FALSE, the entry will be
* in the replacement policy. In this, we simply mark the
* entry as dirty, and update the replacement policy for an
* access.
*
- * If pb_ptr->vfd_swmr_writer, it is possible that we must
+ * If page_buf->vfd_swmr_writer, it is possible that we must
* delay writes to the target page or multi-page metadata
* entry to avoid message from the future bugs on the VFD
* SWMR readers. In such cases we must set the
@@ -3569,34 +3558,34 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5PB__mark_entry_dirty(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *entry_ptr)
+H5PB__mark_entry_dirty(H5F_shared_t *shared, H5PB_t *page_buf, H5PB_entry_t *entry_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
/* sanity checks */
- HDassert(pb_ptr);
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
HDassert(entry_ptr);
HDassert(entry_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
HDassert(entry_ptr->size > 0);
- HDassert(entry_ptr->size >= pb_ptr->page_size);
- HDassert((entry_ptr->size == pb_ptr->page_size) || (entry_ptr->is_mpmde));
+ HDassert(entry_ptr->size >= page_buf->page_size);
+ HDassert((entry_ptr->size == page_buf->page_size) || (entry_ptr->is_mpmde));
HDassert(entry_ptr->image_ptr);
- HDassert((pb_ptr->vfd_swmr_writer) || (!(entry_ptr->is_mpmde)));
+ HDassert((page_buf->vfd_swmr_writer) || (!(entry_ptr->is_mpmde)));
/* mark the entry dirty if necessary */
if (!(entry_ptr->is_dirty)) {
entry_ptr->is_dirty = TRUE;
- H5PB__UPDATE_INDEX_FOR_ENTRY_DIRTY(pb_ptr, entry_ptr)
+ H5PB__UPDATE_INDEX_FOR_ENTRY_DIRTY(page_buf, entry_ptr)
/* since the entry was clean, there can be no pending delayed write */
HDassert(entry_ptr->delay_write_until == 0);
- if ((pb_ptr->vfd_swmr_writer) && (entry_ptr->loaded) && (entry_ptr->mem_type != H5FD_MEM_DRAW) &&
+ if ((page_buf->vfd_swmr_writer) && (entry_ptr->loaded) && (entry_ptr->mem_type != H5FD_MEM_DRAW) &&
(H5F_vfd_swmr_writer__delay_write(shared, entry_ptr->page, &(entry_ptr->delay_write_until)) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "get delayed write request failed")
@@ -3607,14 +3596,14 @@ H5PB__mark_entry_dirty(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *entry
/* remove the entry from the replacement policy */
- H5PB__UPDATE_RP_FOR_REMOVE(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_REMOVE(page_buf, entry_ptr, FAIL)
}
- H5PB__INSERT_IN_DWL(pb_ptr, entry_ptr, FAIL)
+ H5PB__INSERT_IN_DWL(page_buf, entry_ptr, FAIL)
}
else if (!(entry_ptr->is_mpmde)) {
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
else {
@@ -3622,7 +3611,7 @@ H5PB__mark_entry_dirty(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *entry
* has been modified this tick. Thus no action is required.
*/
HDassert(entry_ptr->is_mpmde);
- HDassert(pb_ptr->vfd_swmr_writer);
+ HDassert(page_buf->vfd_swmr_writer);
}
}
else if ((!(entry_ptr->is_mpmde)) && (entry_ptr->delay_write_until == 0)) {
@@ -3630,7 +3619,7 @@ H5PB__mark_entry_dirty(H5F_shared_t *shared, H5PB_t *pb_ptr, H5PB_entry_t *entry
/* the entry is dirty and on the replacement policy -- just update
* the replacement policy for an access
*/
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
done:
@@ -3676,12 +3665,12 @@ done:
* 9) If the read is for metadata, is page aligned, is larger
* than one page, and there is a multi-page metadata entry
* at the target page address, test to see if
- * pb_ptr->vfd_swmr_write is TRUE.
+ * page_buf->vfd_swmr_write is TRUE.
*
* If it is, satisfy the read from the multi-page metadata
* entry, clipping the read if necessary.
*
- * if pb_ptr->vfd_swmr_write is FALSE, flag an error.
+ * if page_buf->vfd_swmr_write is FALSE, flag an error.
*
* 10) If the read is for metadata, is page aligned, is no
* larger than a page, test to see if the page buffer
@@ -3695,7 +3684,7 @@ done:
*
* If it contains a multipage metadata entry at the target
* address, satisfy the read from the multi-page metadata
- * entry if pb_ptr->vfd_swmr_write is TRUE, and flag an
+ * entry if page_buf->vfd_swmr_write is TRUE, and flag an
* error otherwise.
*
* The above case analysis may be a bit hard to read. If so,
@@ -3780,7 +3769,7 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
{
hbool_t bypass = FALSE; /* flag indicating PB bypassed */
hbool_t speculative = FALSE; /* speculative read hint from mdc */
- H5PB_t * pb_ptr; /* Page buffer for this file */
+ H5PB_t * page_buf; /* Page buffer for this file */
H5PB_entry_t *entry_ptr; /* Pointer to page buffer entry */
H5FD_t * file; /* File driver pointer */
uint64_t page; /* page offset of addr */
@@ -3793,12 +3782,12 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* Sanity checks */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->min_rd_pages < pb_ptr->max_pages);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->min_rd_pages < page_buf->max_pages);
HDassert(shared->lf);
file = shared->lf;
@@ -3807,8 +3796,8 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
HDassert(buf);
/* Calculate the aligned address of the first page */
- page = (addr / pb_ptr->page_size);
- page_addr = page * pb_ptr->page_size;
+ page = (addr / page_buf->page_size);
+ page_addr = page * page_buf->page_size;
if (page_addr != addr) { /* case 6 */
@@ -3822,26 +3811,26 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
offset = addr - page_addr;
- if ((offset + size) <= pb_ptr->page_size) {
+ if ((offset + size) <= page_buf->page_size) {
clipped_size = size;
}
else {
- clipped_size = size - ((offset + size) - pb_ptr->page_size);
+ clipped_size = size - ((offset + size) - page_buf->page_size);
}
HDassert(clipped_size > 0);
HDassert(clipped_size <= size);
- HDassert((offset + clipped_size) <= pb_ptr->page_size);
+ HDassert((offset + clipped_size) <= page_buf->page_size);
/* get the containing page */
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, ((entry_ptr) != NULL), TRUE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, ((entry_ptr) != NULL), TRUE, FALSE)
- if ((NULL == entry_ptr) && (H5PB__load_page(shared, pb_ptr, page_addr, type, &entry_ptr) < 0))
+ if ((NULL == entry_ptr) && (H5PB__load_page(shared, page_buf, page_addr, type, &entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "page buffer page load request failed (1)")
@@ -3857,22 +3846,22 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* if the entry is on the LRU, update the replacement policy */
if ((!(entry_ptr->is_mpmde)) && (entry_ptr->delay_write_until == 0)) {
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
}
else {
HDassert(page_addr == addr);
- if (size > pb_ptr->page_size) {
+ if (size > page_buf->page_size) {
/* search the page buffer for an entry at page */
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL)
if (entry_ptr == NULL) { /* case 7 */
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, FALSE, TRUE, size > pb_ptr->page_size)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, FALSE, TRUE, size > page_buf->page_size)
/* If the read is for metadata, is page aligned, is larger
* than page size, and there is no entry in the page buffer,
@@ -3884,7 +3873,7 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
bypass = TRUE;
- H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size);
}
else {
@@ -3905,7 +3894,7 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
* the read from the existing regular entry.
*/
- HDassert(entry_ptr->size == pb_ptr->page_size);
+ HDassert(entry_ptr->size == page_buf->page_size);
speculative = H5C_get_curr_read_speculative(shared->cache);
@@ -3925,7 +3914,7 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "driver read request failed (2)")
bypass = TRUE;
- H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size);
}
else {
@@ -3939,11 +3928,11 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
*/
if ((!(entry_ptr->is_mpmde)) && (entry_ptr->delay_write_until == 0)) {
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, TRUE, TRUE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, TRUE, TRUE, FALSE)
}
}
else { /* case 9 */
@@ -3951,15 +3940,15 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* If the read is for metadata, is page aligned, is larger
* than one page, and there is a multi-page metadata entry
* at the target page address, test to see if
- * pb_ptr->vfd_swmr_write is TRUE.
+ * page_buf->vfd_swmr_write is TRUE.
*
* If it is, satisfy the read from the multi-page metadata
* entry, clipping the read if necessary.
*
- * if pb_ptr->vfd_swmr_write is FALSE, flag an error.
+ * if page_buf->vfd_swmr_write is FALSE, flag an error.
*/
HDassert(entry_ptr->is_mpmde);
- HDassert(pb_ptr->vfd_swmr_writer);
+ HDassert(page_buf->vfd_swmr_writer);
if (size > entry_ptr->size) {
@@ -3978,11 +3967,11 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
*/
if ((!(entry_ptr->is_mpmde)) && (entry_ptr->delay_write_until == 0)) {
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, TRUE, TRUE, TRUE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, TRUE, TRUE, TRUE)
}
}
}
@@ -4000,25 +3989,25 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
*
* If it contains a multipage metadata entry at the target
* address, satisfy the read from the multi-page metadata
- * entry if pb_ptr->vfd_swmr_write is TRUE, and flag an
+ * entry if page_buf->vfd_swmr_write is TRUE, and flag an
* error otherwise.
*/
- HDassert(size <= pb_ptr->page_size);
+ HDassert(size <= page_buf->page_size);
/* get the containing page */
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), TRUE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), TRUE, FALSE)
- if ((NULL == entry_ptr) && (H5PB__load_page(shared, pb_ptr, page_addr, type, &entry_ptr) < 0))
+ if ((NULL == entry_ptr) && (H5PB__load_page(shared, page_buf, page_addr, type, &entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "page buffer page load request failed (2)")
HDassert(entry_ptr);
HDassert(entry_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
HDassert(entry_ptr->is_metadata);
- HDassert((!(entry_ptr->is_mpmde)) || (pb_ptr->vfd_swmr_writer));
+ HDassert((!(entry_ptr->is_mpmde)) || (page_buf->vfd_swmr_writer));
/* copy data from the page into read buffer */
HDmemcpy((uint8_t *)buf, (uint8_t *)(entry_ptr->image_ptr), size);
@@ -4026,13 +4015,13 @@ H5PB__read_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* if the entry is on the LRU, update the replacement policy */
if ((!(entry_ptr->is_mpmde)) && (entry_ptr->delay_write_until == 0)) {
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
}
}
if (!bypass)
- H5PB__UPDATE_STATS_FOR_ACCESS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_ACCESS(page_buf, type, size);
done:
@@ -4083,7 +4072,7 @@ done:
static herr_t
H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/)
{
- H5PB_t * pb_ptr; /* Page buffer for this file */
+ H5PB_t * page_buf; /* Page buffer for this file */
H5PB_entry_t *entry_ptr; /* Pointer to page buffer entry */
uint64_t first_page; /* page offset of first I/O */
uint64_t last_page; /* page offset of last I/O */
@@ -4101,21 +4090,21 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
/* Sanity checks */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->min_md_pages < pb_ptr->max_pages);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->min_md_pages < page_buf->max_pages);
HDassert(H5FD_MEM_DRAW == type);
/* Calculate the aligned address of the first page */
- first_page = (addr / pb_ptr->page_size);
- first_page_addr = first_page * pb_ptr->page_size;
+ first_page = (addr / page_buf->page_size);
+ first_page_addr = first_page * page_buf->page_size;
/* Calculate the aligned address of the last page */
- last_page = ((addr + size - 1) / pb_ptr->page_size);
- last_page_addr = last_page * pb_ptr->page_size;
+ last_page = ((addr + size - 1) / page_buf->page_size);
+ last_page_addr = last_page * page_buf->page_size;
/* Calculate number of pages that this read spans. */
num_touched_pages = last_page - first_page + 1;
@@ -4127,13 +4116,13 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
}
/* case 3) raw data read of page size or greater. */
- if (size >= pb_ptr->page_size) {
+ if (size >= page_buf->page_size) {
if (H5FD_read(shared->lf, type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "read failed")
- H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size);
/* For each page that intersects with the above read, check to see
* if it exists in the page buffer, and if so, if it is dirty.
@@ -4147,10 +4136,10 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
for (i = 0; i < num_touched_pages; i++) {
- H5PB__SEARCH_INDEX(pb_ptr, search_page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, search_page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), FALSE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), FALSE, FALSE)
if (entry_ptr) {
@@ -4158,7 +4147,7 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
HDassert(!(entry_ptr->is_metadata));
HDassert(entry_ptr->page == search_page);
HDassert(entry_ptr->addr == search_addr);
- HDassert(entry_ptr->size == pb_ptr->page_size);
+ HDassert(entry_ptr->size == page_buf->page_size);
HDassert(entry_ptr->delay_write_until == 0);
/* This page and [addr, addr + size) should NOT be disjoint. */
HDassert(!(addr + size <= entry_ptr->addr || entry_ptr->addr + entry_ptr->size <= addr));
@@ -4179,12 +4168,12 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
HDassert(((offset == 0) && (search_addr == addr)) ||
((offset > 0) && (search_addr < addr)));
- HDassert(pb_ptr->page_size >= offset);
+ HDassert(page_buf->page_size >= offset);
- HDassert(size >= pb_ptr->page_size - (size_t)offset);
+ HDassert(size >= page_buf->page_size - (size_t)offset);
HDmemcpy(buf, (uint8_t *)entry_ptr->image_ptr + offset,
- pb_ptr->page_size - (size_t)offset);
+ page_buf->page_size - (size_t)offset);
}
else if (i == num_touched_pages - 1) {
@@ -4197,8 +4186,8 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
HDassert(addr < last_page_addr);
HDassert(last_page_addr < addr + size);
- offset = (num_touched_pages - 2) * pb_ptr->page_size +
- (pb_ptr->page_size - (addr - first_page_addr));
+ offset = (num_touched_pages - 2) * page_buf->page_size +
+ (page_buf->page_size - (addr - first_page_addr));
HDmemcpy((uint8_t *)buf + offset, entry_ptr->image_ptr,
(size_t)((addr + size) - last_page_addr));
@@ -4209,12 +4198,13 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
* entireity.
*/
- offset = (i - 1) * pb_ptr->page_size + (pb_ptr->page_size - (addr - first_page_addr));
+ offset =
+ (i - 1) * page_buf->page_size + (page_buf->page_size - (addr - first_page_addr));
HDassert(addr + offset == search_addr);
- HDassert(offset + pb_ptr->page_size <= size);
+ HDassert(offset + page_buf->page_size <= size);
- HDmemcpy((uint8_t *)buf + offset, entry_ptr->image_ptr, pb_ptr->page_size);
+ HDmemcpy((uint8_t *)buf + offset, entry_ptr->image_ptr, page_buf->page_size);
}
/* we have touched the entry -- move it to the top
@@ -4229,13 +4219,13 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
*
* Thus, just update the LRU.
*/
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
} /* if ( entry_ptr->is_dirty ) */
} /* if ( entry_ptr ) */
search_page++;
- search_addr += pb_ptr->page_size;
+ search_addr += page_buf->page_size;
} /* end for */
}
@@ -4245,12 +4235,12 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
* In this case, read the desired data from the page buffer, loading
* pages if necessary.
*/
- HDassert(size < pb_ptr->page_size);
+ HDassert(size < page_buf->page_size);
/* first page */
offset = addr - first_page_addr;
- if ((offset + size) <= pb_ptr->page_size) {
+ if ((offset + size) <= page_buf->page_size) {
HDassert(num_touched_pages == 1);
length = size;
@@ -4258,16 +4248,16 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
else {
HDassert(num_touched_pages == 2);
- length = size - (pb_ptr->page_size - offset);
+ length = size - (page_buf->page_size - offset);
}
/* get the first page */
- H5PB__SEARCH_INDEX(pb_ptr, first_page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, first_page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), FALSE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), FALSE, FALSE)
- if ((NULL == entry_ptr) && (H5PB__load_page(shared, pb_ptr, first_page_addr, type, &entry_ptr) < 0))
+ if ((NULL == entry_ptr) && (H5PB__load_page(shared, page_buf, first_page_addr, type, &entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "page buffer page load request failed (1)")
@@ -4278,7 +4268,7 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
/* copy data from first page into read buffer */
HDmemcpy((uint8_t *)buf, ((uint8_t *)(entry_ptr->image_ptr) + offset), length);
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
/* second page, if it exists */
if (num_touched_pages == 2) {
@@ -4289,13 +4279,13 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
HDassert(offset + length == size);
/* get the second page */
- H5PB__SEARCH_INDEX(pb_ptr, last_page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, last_page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), FALSE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), FALSE, FALSE)
if ((NULL == entry_ptr) &&
- (H5PB__load_page(shared, pb_ptr, last_page_addr, type, &entry_ptr) < 0))
+ (H5PB__load_page(shared, page_buf, last_page_addr, type, &entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "page buffer page load request failed (2)")
@@ -4307,11 +4297,11 @@ H5PB__read_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size,
/* copy data from second page into read buffer */
HDmemcpy(((uint8_t *)(buf) + offset), (uint8_t *)(entry_ptr->image_ptr), length);
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, FAIL)
}
} /* end else */
- H5PB__UPDATE_STATS_FOR_ACCESS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_ACCESS(page_buf, type, size);
done:
@@ -4382,7 +4372,7 @@ done:
static herr_t
H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf /*in*/)
{
- H5PB_t * pb_ptr; /* Page buffer for this file */
+ H5PB_t * page_buf; /* Page buffer for this file */
H5PB_entry_t *entry_ptr; /* Pointer to page buffer entry */
uint64_t page; /* page offset of addr */
haddr_t page_addr; /* page containg addr */
@@ -4393,26 +4383,26 @@ H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t siz
/* Sanity checks */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->min_rd_pages < pb_ptr->max_pages);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->min_rd_pages < page_buf->max_pages);
HDassert(H5FD_MEM_DRAW != type);
HDassert(buf);
/* Calculate the aligned address of the first page */
- page = (addr / pb_ptr->page_size);
- page_addr = page * pb_ptr->page_size;
+ page = (addr / page_buf->page_size);
+ page_addr = page * page_buf->page_size;
- /* if size > pb_ptr->page_size, addr must be page aligned */
- HDassert((size <= pb_ptr->page_size) || (addr == page_addr));
+ /* if size > page_buf->page_size, addr must be page aligned */
+ HDassert((size <= page_buf->page_size) || (addr == page_addr));
- H5PB__SEARCH_INDEX(pb_ptr, page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, page, entry_ptr, FAIL)
/* case 7) metadata write of size greater than page size. */
- if (size > pb_ptr->page_size) {
+ if (size > page_buf->page_size) {
offset = 0;
@@ -4436,7 +4426,7 @@ H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t siz
*
* This is done via the call to H5PB__mark_entry_dirty()
*/
- HDassert(pb_ptr->vfd_swmr_writer);
+ HDassert(page_buf->vfd_swmr_writer);
HDassert(addr == page_addr);
/* If we're about to overwrite a single-page entry with multiple
@@ -4446,23 +4436,23 @@ H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t siz
H5PB_entry_t *overlap;
void * new_image = H5MM_malloc(size);
uint64_t iter_page;
- uint64_t last_page = page + roundup(size, pb_ptr->page_size) / pb_ptr->page_size;
+ uint64_t last_page = page + roundup(size, page_buf->page_size) / page_buf->page_size;
for (iter_page = page + 1; iter_page < last_page; iter_page++) {
- H5PB__SEARCH_INDEX(pb_ptr, iter_page, overlap, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, iter_page, overlap, FAIL)
HDassert(overlap == NULL);
}
if (new_image == NULL) {
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "couldn't extend entry");
}
- H5PB__UPDATE_RP_FOR_REMOVE(pb_ptr, entry_ptr, FAIL)
+ H5PB__UPDATE_RP_FOR_REMOVE(page_buf, entry_ptr, FAIL)
/* To keep statistics for the index and the tick-list up-to-date,
* it's expedient to remove and re-insert entries there.
*/
- H5PB__DELETE_FROM_INDEX(pb_ptr, entry_ptr, FAIL)
+ H5PB__DELETE_FROM_INDEX(page_buf, entry_ptr, FAIL)
if (entry_ptr->modified_this_tick)
- H5PB__REMOVE_FROM_TL(pb_ptr, entry_ptr, FAIL)
+ H5PB__REMOVE_FROM_TL(page_buf, entry_ptr, FAIL)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
entry_ptr->image_ptr = new_image;
@@ -4470,12 +4460,12 @@ H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t siz
entry_ptr->size = size;
if (entry_ptr->modified_this_tick)
- H5PB__INSERT_IN_TL(pb_ptr, entry_ptr, FAIL)
- H5PB__INSERT_IN_INDEX(pb_ptr, entry_ptr, FAIL)
+ H5PB__INSERT_IN_TL(page_buf, entry_ptr, FAIL)
+ H5PB__INSERT_IN_INDEX(page_buf, entry_ptr, FAIL)
}
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), TRUE, TRUE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), TRUE, TRUE)
if (NULL == entry_ptr) {
@@ -4485,7 +4475,7 @@ H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t siz
* Don't bother to try to make space for it, as VFD SWMR
* ignores the limits on page buffer size.
*/
- if (H5PB__create_new_page(pb_ptr, addr, size, type, FALSE, &entry_ptr) < 0)
+ if (H5PB__create_new_page(page_buf, addr, size, type, FALSE, &entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "can't create new page buffer page")
@@ -4508,19 +4498,19 @@ H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t siz
offset = addr - page_addr;
/* write cannot cross page boundaries. */
- HDassert((offset + size) <= pb_ptr->page_size);
+ HDassert((offset + size) <= page_buf->page_size);
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), TRUE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), TRUE, FALSE)
- if (NULL == entry_ptr && H5PB__load_page(shared, pb_ptr, page_addr, type, &entry_ptr) < 0) {
+ if (NULL == entry_ptr && H5PB__load_page(shared, page_buf, page_addr, type, &entry_ptr) < 0) {
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "page buffer page load request failed (1)")
}
HDassert(entry_ptr->magic == H5PB__H5PB_ENTRY_T_MAGIC);
HDassert(entry_ptr->addr == page_addr);
HDassert(!(entry_ptr->is_mpmde));
- HDassert(entry_ptr->size == pb_ptr->page_size);
+ HDassert(entry_ptr->size == page_buf->page_size);
HDassert(size <= entry_ptr->size);
}
@@ -4529,19 +4519,19 @@ H5PB__write_meta(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t siz
/* copy data from the write buffer into the page image */
HDmemcpy((uint8_t *)(entry_ptr->image_ptr) + offset, buf, size);
- if (H5PB__mark_entry_dirty(shared, pb_ptr, entry_ptr) < 0)
+ if (H5PB__mark_entry_dirty(shared, page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry dirty failed")
/* Force the page buffer to retain the page until the end of
* the tick: add the entry to the tick list if it is not
* already present.
*/
- if (pb_ptr->vfd_swmr_writer && !entry_ptr->modified_this_tick) {
+ if (page_buf->vfd_swmr_writer && !entry_ptr->modified_this_tick) {
entry_ptr->modified_this_tick = TRUE;
- H5PB__INSERT_IN_TL(pb_ptr, entry_ptr, FAIL)
+ H5PB__INSERT_IN_TL(page_buf, entry_ptr, FAIL)
}
- H5PB__UPDATE_STATS_FOR_ACCESS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_ACCESS(page_buf, type, size);
done:
@@ -4592,7 +4582,7 @@ done:
static herr_t
H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf /*out*/)
{
- H5PB_t * pb_ptr; /* Page buffer for this file */
+ H5PB_t * page_buf; /* Page buffer for this file */
H5PB_entry_t *entry_ptr; /* Pointer to page buffer entry */
uint64_t first_page; /* page offset of first I/O */
uint64_t last_page; /* page offset of last I/O */
@@ -4610,23 +4600,23 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* Sanity checks */
HDassert(shared);
- HDassert(shared->pb_ptr);
+ HDassert(shared->page_buf);
- pb_ptr = shared->pb_ptr;
+ page_buf = shared->page_buf;
- HDassert(pb_ptr->magic == H5PB__H5PB_T_MAGIC);
- HDassert(pb_ptr->min_md_pages < pb_ptr->max_pages);
+ HDassert(page_buf->magic == H5PB__H5PB_T_MAGIC);
+ HDassert(page_buf->min_md_pages < page_buf->max_pages);
HDassert(shared->lf);
HDassert(H5FD_MEM_DRAW == type);
/* Calculate the aligned address of the first page */
- first_page = (addr / pb_ptr->page_size);
- first_page_addr = first_page * pb_ptr->page_size;
+ first_page = (addr / page_buf->page_size);
+ first_page_addr = first_page * page_buf->page_size;
/* Calculate the aligned address of the last page */
- last_page = ((addr + size - 1) / pb_ptr->page_size);
- last_page_addr = last_page * pb_ptr->page_size;
+ last_page = ((addr + size - 1) / page_buf->page_size);
+ last_page_addr = last_page * page_buf->page_size;
/* Calculate number of pages that this read spans. */
num_touched_pages = last_page - first_page + 1;
@@ -4638,12 +4628,12 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
}
/* case 3) raw data write of page size or greater. */
- if (size >= pb_ptr->page_size) {
+ if (size >= page_buf->page_size) {
if (H5FD_write(shared->lf, type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "write through metadata accumulator failed")
- H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size);
/* For each page that intersects with the above write, check to see
* if it exists in the page buffer.
@@ -4659,10 +4649,10 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
for (i = 0; i < num_touched_pages; i++) {
- H5PB__SEARCH_INDEX(pb_ptr, search_page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, search_page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), FALSE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), FALSE, FALSE)
if (entry_ptr) {
@@ -4670,7 +4660,7 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
HDassert(!(entry_ptr->is_metadata));
HDassert(entry_ptr->page == search_page);
HDassert(entry_ptr->addr == search_addr);
- HDassert(entry_ptr->size == pb_ptr->page_size);
+ HDassert(entry_ptr->size == page_buf->page_size);
HDassert(entry_ptr->delay_write_until == 0);
HDassert(entry_ptr->addr <= addr + size);
@@ -4679,7 +4669,7 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* the page is completely overwritten -- mark it clean
* and evict it.
*/
- if ((entry_ptr->is_dirty) && (H5PB__mark_entry_clean(pb_ptr, entry_ptr) < 0))
+ if ((entry_ptr->is_dirty) && (H5PB__mark_entry_clean(page_buf, entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry clean failed")
@@ -4699,13 +4689,13 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
offset = addr - first_page_addr;
HDassert(offset > 0);
- HDassert(pb_ptr->page_size >= offset);
- HDassert(size >= pb_ptr->page_size - (size_t)offset);
+ HDassert(page_buf->page_size >= offset);
+ HDassert(size >= page_buf->page_size - (size_t)offset);
HDmemcpy((uint8_t *)entry_ptr->image_ptr + offset, buf,
- pb_ptr->page_size - (size_t)offset);
+ page_buf->page_size - (size_t)offset);
- if (H5PB__mark_entry_dirty(shared, pb_ptr, entry_ptr) < 0)
+ if (H5PB__mark_entry_dirty(shared, page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry dirty failed (1)")
}
@@ -4718,13 +4708,13 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
HDassert(addr < last_page_addr);
HDassert(last_page_addr < addr + size);
- offset = (num_touched_pages - 2) * pb_ptr->page_size +
- (pb_ptr->page_size - (addr - first_page_addr));
+ offset = (num_touched_pages - 2) * page_buf->page_size +
+ (page_buf->page_size - (addr - first_page_addr));
HDmemcpy(entry_ptr->image_ptr, (const uint8_t *)buf + offset,
(size_t)((addr + size) - last_page_addr));
- if (H5PB__mark_entry_dirty(shared, pb_ptr, entry_ptr) < 0)
+ if (H5PB__mark_entry_dirty(shared, page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry dirty failed (2)")
}
@@ -4736,7 +4726,7 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
} /* if ( entry_ptr ) */
search_page++;
- search_addr += pb_ptr->page_size;
+ search_addr += page_buf->page_size;
} /* end for */
}
@@ -4746,12 +4736,12 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
* In this case, write the data to the page buffer, loading
* pages if necessary.
*/
- HDassert(size < pb_ptr->page_size);
+ HDassert(size < page_buf->page_size);
/* first page */
offset = addr - first_page_addr;
- if ((offset + size) <= pb_ptr->page_size) {
+ if ((offset + size) <= page_buf->page_size) {
HDassert(num_touched_pages == 1);
length = size;
@@ -4759,17 +4749,17 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
else {
HDassert(num_touched_pages == 2);
- length = pb_ptr->page_size - offset;
- HDassert(offset + length == pb_ptr->page_size);
+ length = page_buf->page_size - offset;
+ HDassert(offset + length == page_buf->page_size);
}
/* get the first page */
- H5PB__SEARCH_INDEX(pb_ptr, first_page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, first_page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), FALSE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), FALSE, FALSE)
- if ((NULL == entry_ptr) && (H5PB__load_page(shared, pb_ptr, first_page_addr, type, &entry_ptr) < 0))
+ if ((NULL == entry_ptr) && (H5PB__load_page(shared, page_buf, first_page_addr, type, &entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "page buffer page load request failed (1)")
@@ -4780,7 +4770,7 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* copy data from the write buffer into the first page */
HDmemcpy(((uint8_t *)(entry_ptr->image_ptr)) + offset, (const uint8_t *)buf, length);
- if (H5PB__mark_entry_dirty(shared, pb_ptr, entry_ptr) < 0)
+ if (H5PB__mark_entry_dirty(shared, page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry dirty failed (3)")
@@ -4793,13 +4783,13 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
HDassert(offset + length == size);
/* get the first page */
- H5PB__SEARCH_INDEX(pb_ptr, last_page, entry_ptr, FAIL)
+ H5PB__SEARCH_INDEX(page_buf, last_page, entry_ptr, FAIL)
/* update hit rate stats */
- H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, (entry_ptr != NULL), FALSE, FALSE)
+ H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, (entry_ptr != NULL), FALSE, FALSE)
if ((NULL == entry_ptr) &&
- (H5PB__load_page(shared, pb_ptr, last_page_addr, type, &entry_ptr) < 0))
+ (H5PB__load_page(shared, page_buf, last_page_addr, type, &entry_ptr) < 0))
HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "page buffer page load request failed (2)")
@@ -4811,13 +4801,13 @@ H5PB__write_raw(H5F_shared_t *shared, H5FD_mem_t type, haddr_t addr, size_t size
/* copy data from the write buffer into the first page */
HDmemcpy((uint8_t *)(entry_ptr->image_ptr), ((const uint8_t *)(buf) + offset), length);
- if (H5PB__mark_entry_dirty(shared, pb_ptr, entry_ptr) < 0)
+ if (H5PB__mark_entry_dirty(shared, page_buf, entry_ptr) < 0)
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "mark entry dirty failed (3)")
}
}
- H5PB__UPDATE_STATS_FOR_ACCESS(pb_ptr, type, size);
+ H5PB__UPDATE_STATS_FOR_ACCESS(page_buf, type, size);
done:
diff --git a/src/H5PBpkg.h b/src/H5PBpkg.h
index e1b06aa..f06206b 100644
--- a/src/H5PBpkg.h
+++ b/src/H5PBpkg.h
@@ -27,10 +27,10 @@
* Purpose: This file contains declarations which are normally visible
* only within the H5PB package.
*
- * Source files outside the H5PB package should include
- * H5PBprivate.h instead.
+ * Source files outside the H5PB package should include
+ * H5PBprivate.h instead.
*
- * Programmer: John Mainzer -- 10/07/18
+ * Programmer: John Mainzer -- 10/07/18
*/
/**************************/
@@ -62,7 +62,7 @@
* to the HGOTO_ERROR macro, which may not be appropriate in all cases.
* If so, we will need versions of the insertion and deletion macros which
* do not reference the sanity checking macros.
- * JRM - 10/07/18
+ * JRM -- 10/07/18
*
****************************************************************************/
@@ -432,12 +432,12 @@
#if H5PB__COLLECT_PAGE_BUFFER_STATS
-#define H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, hit, is_metadata, is_mpmde) \
+#define H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, hit, is_metadata, is_mpmde) \
{ \
int ii; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
\
if (is_metadata) { \
if (is_mpmde) { \
@@ -451,132 +451,132 @@
ii = H5PB__STATS_RD; \
} \
if (hit) \
- ((pb_ptr)->hits[ii])++; \
+ ((page_buf)->hits[ii])++; \
else \
- ((pb_ptr)->misses[ii])++; \
+ ((page_buf)->misses[ii])++; \
} /* H5PB__UPDATE_PB_HIT_RATE_STATS */
-#define H5PB__UPDATE_HT_SIZE_STATS(pb_ptr) \
- if ((pb_ptr)->index_len > (pb_ptr)->max_index_len) \
- (pb_ptr)->max_index_len = (pb_ptr)->index_len; \
- if ((pb_ptr)->clean_index_len > (pb_ptr)->max_clean_index_len) \
- (pb_ptr)->max_clean_index_len = (pb_ptr)->clean_index_len; \
- if ((pb_ptr)->dirty_index_len > (pb_ptr)->max_dirty_index_len) \
- (pb_ptr)->max_dirty_index_len = (pb_ptr)->dirty_index_len; \
- if ((pb_ptr)->index_size > (pb_ptr)->max_index_size) \
- (pb_ptr)->max_index_size = (pb_ptr)->index_size; \
- if ((pb_ptr)->clean_index_size > (pb_ptr)->max_clean_index_size) \
- (pb_ptr)->max_clean_index_size = (pb_ptr)->clean_index_size; \
- if ((pb_ptr)->dirty_index_size > (pb_ptr)->max_dirty_index_size) \
- (pb_ptr)->max_dirty_index_size = (pb_ptr)->dirty_index_size; \
- if ((pb_ptr)->curr_md_pages > (pb_ptr)->max_md_pages) \
- (pb_ptr)->max_md_pages = (pb_ptr)->curr_md_pages; \
- if ((pb_ptr)->curr_rd_pages > (pb_ptr)->max_rd_pages) \
- (pb_ptr)->max_rd_pages = (pb_ptr)->curr_rd_pages; \
- if ((pb_ptr)->mpmde_count > (pb_ptr)->max_mpmde_count) \
- (pb_ptr)->max_rd_pages = (pb_ptr)->curr_rd_pages;
-
-#define H5PB__UPDATE_STATS_FOR_HT_INSERTION(pb_ptr) ((pb_ptr)->total_ht_insertions)++;
-
-#define H5PB__UPDATE_STATS_FOR_HT_DELETION(pb_ptr) (pb_ptr)->total_ht_deletions++;
-
-#define H5PB__UPDATE_STATS_FOR_HT_SEARCH(pb_ptr, success, depth) \
+#define H5PB__UPDATE_HT_SIZE_STATS(page_buf) \
+ if ((page_buf)->index_len > (page_buf)->max_index_len) \
+ (page_buf)->max_index_len = (page_buf)->index_len; \
+ if ((page_buf)->clean_index_len > (page_buf)->max_clean_index_len) \
+ (page_buf)->max_clean_index_len = (page_buf)->clean_index_len; \
+ if ((page_buf)->dirty_index_len > (page_buf)->max_dirty_index_len) \
+ (page_buf)->max_dirty_index_len = (page_buf)->dirty_index_len; \
+ if ((page_buf)->index_size > (page_buf)->max_index_size) \
+ (page_buf)->max_index_size = (page_buf)->index_size; \
+ if ((page_buf)->clean_index_size > (page_buf)->max_clean_index_size) \
+ (page_buf)->max_clean_index_size = (page_buf)->clean_index_size; \
+ if ((page_buf)->dirty_index_size > (page_buf)->max_dirty_index_size) \
+ (page_buf)->max_dirty_index_size = (page_buf)->dirty_index_size; \
+ if ((page_buf)->curr_md_pages > (page_buf)->max_md_pages) \
+ (page_buf)->max_md_pages = (page_buf)->curr_md_pages; \
+ if ((page_buf)->curr_rd_pages > (page_buf)->max_rd_pages) \
+ (page_buf)->max_rd_pages = (page_buf)->curr_rd_pages; \
+ if ((page_buf)->mpmde_count > (page_buf)->max_mpmde_count) \
+ (page_buf)->max_rd_pages = (page_buf)->curr_rd_pages;
+
+#define H5PB__UPDATE_STATS_FOR_HT_INSERTION(page_buf) ((page_buf)->total_ht_insertions)++;
+
+#define H5PB__UPDATE_STATS_FOR_HT_DELETION(page_buf) (page_buf)->total_ht_deletions++;
+
+#define H5PB__UPDATE_STATS_FOR_HT_SEARCH(page_buf, success, depth) \
HDassert(depth >= 0); \
if (success) { \
- (pb_ptr)->successful_ht_searches++; \
- (pb_ptr)->total_successful_ht_search_depth += (int64_t)depth; \
+ (page_buf)->successful_ht_searches++; \
+ (page_buf)->total_successful_ht_search_depth += (int64_t)depth; \
} \
else { \
- (pb_ptr)->failed_ht_searches++; \
- (pb_ptr)->total_failed_ht_search_depth += (int64_t)depth; \
+ (page_buf)->failed_ht_searches++; \
+ (page_buf)->total_failed_ht_search_depth += (int64_t)depth; \
}
-#define H5PB__UPDATE_LRU_SIZE_STATS(pb_ptr) \
- if ((pb_ptr)->LRU_len > (pb_ptr)->max_lru_len) \
- (pb_ptr)->max_lru_len = (pb_ptr)->LRU_len; \
- if ((pb_ptr)->LRU_size > (pb_ptr)->max_lru_size) \
- (pb_ptr)->max_lru_size = (pb_ptr)->LRU_size;
+#define H5PB__UPDATE_LRU_SIZE_STATS(page_buf) \
+ if ((page_buf)->LRU_len > (page_buf)->max_lru_len) \
+ (page_buf)->max_lru_len = (page_buf)->LRU_len; \
+ if ((page_buf)->LRU_size > (page_buf)->max_lru_size) \
+ (page_buf)->max_lru_size = (page_buf)->LRU_size;
-#define H5PB__UPDATE_STATS_FOR_LRU_MD_SKIP(pb_ptr) ((pb_ptr)->lru_md_skips)++;
+#define H5PB__UPDATE_STATS_FOR_LRU_MD_SKIP(page_buf) ((page_buf)->lru_md_skips)++;
-#define H5PB__UPDATE_STATS_FOR_LRU_RD_SKIP(pb_ptr) ((pb_ptr)->lru_rd_skips)++;
+#define H5PB__UPDATE_STATS_FOR_LRU_RD_SKIP(page_buf) ((page_buf)->lru_rd_skips)++;
-#define H5PB__UPDATE_STATS_FOR_LRU_TL_SKIP(pb_ptr) \
+#define H5PB__UPDATE_STATS_FOR_LRU_TL_SKIP(page_buf) \
{ \
- HDassert(pb_ptr->vfd_swmr_writer); \
- ((pb_ptr)->lru_tl_skips)++; \
+ HDassert(page_buf->vfd_swmr_writer); \
+ ((page_buf)->lru_tl_skips)++; \
}
-#define H5PB__UPDATE_TL_SIZE_STATS(pb_ptr) \
+#define H5PB__UPDATE_TL_SIZE_STATS(page_buf) \
{ \
- HDassert((pb_ptr)->vfd_swmr_writer); \
- if ((pb_ptr)->tl_len > (pb_ptr)->max_tl_len) \
- (pb_ptr)->max_tl_len = (pb_ptr)->tl_len; \
- if ((pb_ptr)->tl_size > (pb_ptr)->max_tl_size) \
- (pb_ptr)->max_tl_size = (pb_ptr)->tl_size; \
+ HDassert((page_buf)->vfd_swmr_writer); \
+ if ((page_buf)->tl_len > (page_buf)->max_tl_len) \
+ (page_buf)->max_tl_len = (page_buf)->tl_len; \
+ if ((page_buf)->tl_size > (page_buf)->max_tl_size) \
+ (page_buf)->max_tl_size = (page_buf)->tl_size; \
}
-#define H5PB__UPDATE_DWL_SIZE_STATS(pb_ptr) \
+#define H5PB__UPDATE_DWL_SIZE_STATS(page_buf) \
{ \
- HDassert((pb_ptr)->vfd_swmr_writer); \
- if ((pb_ptr)->dwl_len > (pb_ptr)->max_dwl_len) \
- (pb_ptr)->max_dwl_len = (pb_ptr)->dwl_len; \
- if ((pb_ptr)->dwl_size > (pb_ptr)->max_dwl_size) \
- (pb_ptr)->max_dwl_size = (pb_ptr)->dwl_size; \
+ HDassert((page_buf)->vfd_swmr_writer); \
+ if ((page_buf)->dwl_len > (page_buf)->max_dwl_len) \
+ (page_buf)->max_dwl_len = (page_buf)->dwl_len; \
+ if ((page_buf)->dwl_size > (page_buf)->max_dwl_size) \
+ (page_buf)->max_dwl_size = (page_buf)->dwl_size; \
}
-#define H5PB__UPDATE_DWL_DELAYED_WRITES(pb_ptr, insertion_depth, delay) \
+#define H5PB__UPDATE_DWL_DELAYED_WRITES(page_buf, insertion_depth, delay) \
{ \
- HDassert((pb_ptr)->vfd_swmr_writer); \
- (pb_ptr)->delayed_writes++; \
- (pb_ptr)->total_delay += (int64_t)(delay); \
- (pb_ptr)->total_dwl_ins_depth += (insertion_depth); \
+ HDassert((page_buf)->vfd_swmr_writer); \
+ (page_buf)->delayed_writes++; \
+ (page_buf)->total_delay += (int64_t)(delay); \
+ (page_buf)->total_dwl_ins_depth += (insertion_depth); \
}
-#define H5PB__UPDATE_STATS_FOR_ACCESS(pb_ptr, type, size) \
+#define H5PB__UPDATE_STATS_FOR_ACCESS(page_buf, type, size) \
{ \
int _i; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
\
if (H5FD_MEM_DRAW == (type)) { \
_i = H5PB__STATS_RD; \
} \
- else if ((size) > (pb_ptr)->page_size) { \
+ else if ((size) > (page_buf)->page_size) { \
_i = H5PB__STATS_MPMDE; \
} \
else { \
_i = H5PB__STATS_MD; \
} \
- ((pb_ptr)->accesses[_i])++; \
+ ((page_buf)->accesses[_i])++; \
} /* H5PB__UPDATE_STATS_FOR_ACCESS */
-#define H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size) \
+#define H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size) \
{ \
int ii; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
\
if (H5FD_MEM_DRAW == (type)) { \
ii = H5PB__STATS_RD; \
} \
- else if ((size) > (pb_ptr)->page_size) { \
+ else if ((size) > (page_buf)->page_size) { \
ii = H5PB__STATS_MPMDE; \
} \
else { \
ii = H5PB__STATS_MD; \
} \
- ((pb_ptr)->bypasses[ii])++; \
+ ((page_buf)->bypasses[ii])++; \
} /* H5PB__UPDATE_STATS_FOR_BYPASS */
-#define H5PB__UPDATE_STATS_FOR_FLUSH(pb_ptr, entry_ptr) \
+#define H5PB__UPDATE_STATS_FOR_FLUSH(page_buf, entry_ptr) \
{ \
int i; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert(entry_ptr); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
\
@@ -591,15 +591,15 @@
else { \
i = H5PB__STATS_RD; \
} \
- ((pb_ptr)->flushes[i])++; \
+ ((page_buf)->flushes[i])++; \
} /* H5PB__UPDATE_STATS_FOR_FLUSH */
-#define H5PB__UPDATE_STATS_FOR_EVICTION(pb_ptr, entry_ptr) \
+#define H5PB__UPDATE_STATS_FOR_EVICTION(page_buf, entry_ptr) \
{ \
int i; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert(entry_ptr); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
\
@@ -614,15 +614,15 @@
else { \
i = H5PB__STATS_RD; \
} \
- ((pb_ptr)->evictions[i])++; \
+ ((page_buf)->evictions[i])++; \
} /* H5PB__UPDATE_STATS_FOR_EVICTION */
-#define H5PB__UPDATE_STATS_FOR_CLEAR(pb_ptr, entry_ptr) \
+#define H5PB__UPDATE_STATS_FOR_CLEAR(page_buf, entry_ptr) \
{ \
int i; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert(entry_ptr); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
\
@@ -637,15 +637,15 @@
else { \
i = H5PB__STATS_RD; \
} \
- ((pb_ptr)->clears[i])++; \
+ ((page_buf)->clears[i])++; \
} /* H5PB__UPDATE_STATS_FOR_CLEAR */
-#define H5PB__UPDATE_STATS_FOR_INSERTION(pb_ptr, entry_ptr) \
+#define H5PB__UPDATE_STATS_FOR_INSERTION(page_buf, entry_ptr) \
{ \
int i; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert(entry_ptr); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
\
@@ -660,15 +660,15 @@
else { \
i = H5PB__STATS_RD; \
} \
- ((pb_ptr)->insertions[i])++; \
+ ((page_buf)->insertions[i])++; \
} /* H5PB__UPDATE_STATS_FOR_INSERTION */
-#define H5PB__UPDATE_STATS_FOR_LOAD(pb_ptr, entry_ptr) \
+#define H5PB__UPDATE_STATS_FOR_LOAD(page_buf, entry_ptr) \
{ \
int i; \
\
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert(entry_ptr); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
\
@@ -683,47 +683,47 @@
else { \
i = H5PB__STATS_RD; \
} \
- ((pb_ptr)->loads[i])++; \
+ ((page_buf)->loads[i])++; \
} /* H5PB__UPDATE_STATS_FOR_LOAD */
-#define H5PB__UPDATE_STATS_FOR_READ_SPLIT(pb_ptr) \
+#define H5PB__UPDATE_STATS_FOR_READ_SPLIT(page_buf) \
{ \
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
- (pb_ptr->md_read_splits)++; \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
+ (page_buf->md_read_splits)++; \
} /* H5PB__UPDATE_STATS_FOR_READ_SPLIT */
-#define H5PB__UPDATE_STATS_FOR_WRITE_SPLIT(pb_ptr) \
+#define H5PB__UPDATE_STATS_FOR_WRITE_SPLIT(page_buf) \
{ \
- HDassert(pb_ptr); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
- (pb_ptr->md_write_splits)++; \
+ HDassert(page_buf); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
+ (page_buf->md_write_splits)++; \
} /* H5PB__UPDATE_STATS_FOR_READ_SPLIT */
#else /* H5PB__COLLECT_PAGE_BUFFER_STATS */
-#define H5PB__UPDATE_PB_HIT_RATE_STATS(pb_ptr, hit, is_metadata, is_mpmde)
-#define H5PB__UPDATE_HT_SIZE_STATS(pb_ptr)
-#define H5PB__UPDATE_STATS_FOR_HT_INSERTION(pb_ptr)
-#define H5PB__UPDATE_STATS_FOR_HT_DELETION(pb_ptr)
-#define H5PB__UPDATE_HT_SEARCH_STATS(pb_ptr, success, depth)
-#define H5PB__UPDATE_LRU_SIZE_STATS(pb_ptr)
-#define H5PB__UPDATE_STATS_FOR_LRU_MD_SKIP(pb_ptr)
-#define H5PB__UPDATE_STATS_FOR_LRU_RD_SKIP(pb_ptr)
-#define H5PB__UPDATE_STATS_FOR_LRU_TL_SKIP(pb_ptr)
-#define H5PB__UPDATE_STATS_FOR_LRU_DWL_SKIP(pb_ptr)
-#define H5PB__UPDATE_TL_SIZE_STATS(pb_ptr)
-#define H5PB__UPDATE_DWL_SIZE_STATS(pb_ptr)
-#define H5PB__UPDATE_DWL_DELAYED_WRITES(pb_ptr, insertion_depth, delay)
-#define H5PB__UPDATE_STATS_FOR_ACCESS(pb_ptr, type, size)
-#define H5PB__UPDATE_STATS_FOR_BYPASS(pb_ptr, type, size)
-#define H5PB__UPDATE_STATS_FOR_FLUSH(pb_ptr, entry_ptr)
-#define H5PB__UPDATE_STATS_FOR_EVICTION(pb_ptr, entry_ptr)
-#define H5PB__UPDATE_STATS_FOR_CLEAR(pb_ptr, entry_ptr)
-#define H5PB__UPDATE_STATS_FOR_INSERTION(pb_ptr, entry_ptr)
-#define H5PB__UPDATE_STATS_FOR_LOAD(pb_ptr, entry_ptr)
-#define H5PB__UPDATE_STATS_FOR_READ_SPLIT(pb_ptr)
-#define H5PB__UPDATE_STATS_FOR_WRITE_SPLIT(pb_ptr)
+#define H5PB__UPDATE_PB_HIT_RATE_STATS(page_buf, hit, is_metadata, is_mpmde)
+#define H5PB__UPDATE_HT_SIZE_STATS(page_buf)
+#define H5PB__UPDATE_STATS_FOR_HT_INSERTION(page_buf)
+#define H5PB__UPDATE_STATS_FOR_HT_DELETION(page_buf)
+#define H5PB__UPDATE_HT_SEARCH_STATS(page_buf, success, depth)
+#define H5PB__UPDATE_LRU_SIZE_STATS(page_buf)
+#define H5PB__UPDATE_STATS_FOR_LRU_MD_SKIP(page_buf)
+#define H5PB__UPDATE_STATS_FOR_LRU_RD_SKIP(page_buf)
+#define H5PB__UPDATE_STATS_FOR_LRU_TL_SKIP(page_buf)
+#define H5PB__UPDATE_STATS_FOR_LRU_DWL_SKIP(page_buf)
+#define H5PB__UPDATE_TL_SIZE_STATS(page_buf)
+#define H5PB__UPDATE_DWL_SIZE_STATS(page_buf)
+#define H5PB__UPDATE_DWL_DELAYED_WRITES(page_buf, insertion_depth, delay)
+#define H5PB__UPDATE_STATS_FOR_ACCESS(page_buf, type, size)
+#define H5PB__UPDATE_STATS_FOR_BYPASS(page_buf, type, size)
+#define H5PB__UPDATE_STATS_FOR_FLUSH(page_buf, entry_ptr)
+#define H5PB__UPDATE_STATS_FOR_EVICTION(page_buf, entry_ptr)
+#define H5PB__UPDATE_STATS_FOR_CLEAR(page_buf, entry_ptr)
+#define H5PB__UPDATE_STATS_FOR_INSERTION(page_buf, entry_ptr)
+#define H5PB__UPDATE_STATS_FOR_LOAD(page_buf, entry_ptr)
+#define H5PB__UPDATE_STATS_FOR_READ_SPLIT(page_buf)
+#define H5PB__UPDATE_STATS_FOR_WRITE_SPLIT(page_buf)
#endif /* H5PB__COLLECT_PAGE_BUFFER_STATS */
@@ -751,311 +751,313 @@
#if H5PB__DO_SANITY_CHECKS
-#define H5PB__PRE_HT_INSERT_SC(pb_ptr, entry_ptr, fail_val) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || ((entry_ptr) == NULL) || \
+#define H5PB__PRE_HT_INSERT_SC(page_buf, entry_ptr, fail_val) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || ((entry_ptr) == NULL) || \
((entry_ptr)->ht_next != NULL) || ((entry_ptr)->ht_prev != NULL) || ((entry_ptr)->size <= 0) || \
(H5PB__HASH_FCN((entry_ptr)->page) < 0) || \
(H5PB__HASH_FCN((entry_ptr)->page) >= H5PB__HASH_TABLE_LEN) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_len != (pb_ptr)->il_len) || ((pb_ptr)->index_size != (pb_ptr)->il_size) || \
- ((pb_ptr)->curr_pages < 0) || ((pb_ptr)->curr_rd_pages < 0) || ((pb_ptr)->curr_md_pages < 0) || \
- (((pb_ptr)->curr_pages != ((pb_ptr)->curr_md_pages + (pb_ptr)->curr_rd_pages))) || \
- ((pb_ptr)->mpmde_count < 0) || \
- ((pb_ptr)->index_len != ((pb_ptr)->curr_pages + (pb_ptr)->mpmde_count))) { \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_len != (page_buf)->il_len) || ((page_buf)->index_size != (page_buf)->il_size) || \
+ ((page_buf)->curr_pages < 0) || ((page_buf)->curr_rd_pages < 0) || \
+ ((page_buf)->curr_md_pages < 0) || \
+ (((page_buf)->curr_pages != ((page_buf)->curr_md_pages + (page_buf)->curr_rd_pages))) || \
+ ((page_buf)->mpmde_count < 0) || \
+ ((page_buf)->index_len != ((page_buf)->curr_pages + (page_buf)->mpmde_count))) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
}
-#define H5PB__POST_HT_INSERT_SC(pb_ptr, entry_ptr, fail_val) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_len != (pb_ptr)->il_len) || \
- ((pb_ptr)->index_len != ((pb_ptr)->curr_pages + (pb_ptr)->mpmde_count)) || \
- ((pb_ptr)->index_size != (pb_ptr)->il_size)) { \
+#define H5PB__POST_HT_INSERT_SC(page_buf, entry_ptr, fail_val) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_len != (page_buf)->il_len) || \
+ ((page_buf)->index_len != ((page_buf)->curr_pages + (page_buf)->mpmde_count)) || \
+ ((page_buf)->index_size != (page_buf)->il_size)) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
}
-#define H5PB__PRE_HT_REMOVE_SC(pb_ptr, entry_ptr) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || ((pb_ptr)->index_len < 1) || \
- ((entry_ptr) == NULL) || ((pb_ptr)->index_size < (int64_t)((entry_ptr)->size)) || \
+#define H5PB__PRE_HT_REMOVE_SC(page_buf, entry_ptr) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || ((page_buf)->index_len < 1) || \
+ ((entry_ptr) == NULL) || ((page_buf)->index_size < (int64_t)((entry_ptr)->size)) || \
((entry_ptr)->size <= 0) || (H5PB__HASH_FCN((entry_ptr)->page) < 0) || \
(H5PB__HASH_FCN((entry_ptr)->page) >= H5PB__HASH_TABLE_LEN) || \
- (((pb_ptr)->ht)[(H5PB__HASH_FCN((entry_ptr)->page))] == NULL) || \
- ((((pb_ptr)->ht)[(H5PB__HASH_FCN((entry_ptr)->page))] != (entry_ptr)) && \
+ (((page_buf)->ht)[(H5PB__HASH_FCN((entry_ptr)->page))] == NULL) || \
+ ((((page_buf)->ht)[(H5PB__HASH_FCN((entry_ptr)->page))] != (entry_ptr)) && \
((entry_ptr)->ht_prev == NULL)) || \
- ((((pb_ptr)->ht)[(H5PB__HASH_FCN((entry_ptr)->page))] == (entry_ptr)) && \
+ ((((page_buf)->ht)[(H5PB__HASH_FCN((entry_ptr)->page))] == (entry_ptr)) && \
((entry_ptr)->ht_prev != NULL)) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_len != (pb_ptr)->il_len) || ((pb_ptr)->index_size != (pb_ptr)->il_size)) { \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_len != (page_buf)->il_len) || ((page_buf)->index_size != (page_buf)->il_size)) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \
}
-#define H5PB__POST_HT_REMOVE_SC(pb_ptr, entry_ptr) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || ((entry_ptr) == NULL) || \
+#define H5PB__POST_HT_REMOVE_SC(page_buf, entry_ptr) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || ((entry_ptr) == NULL) || \
((entry_ptr)->size <= 0) || ((entry_ptr)->ht_prev != NULL) || ((entry_ptr)->ht_prev != NULL) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_len != (pb_ptr)->il_len) || ((pb_ptr)->index_size != (pb_ptr)->il_size) || \
- ((pb_ptr)->curr_pages < 0) || ((pb_ptr)->curr_rd_pages < 0) || ((pb_ptr)->curr_md_pages < 0) || \
- (((pb_ptr)->curr_pages != ((pb_ptr)->curr_md_pages + (pb_ptr)->curr_rd_pages))) || \
- ((pb_ptr)->mpmde_count < 0) || \
- ((pb_ptr)->index_len != ((pb_ptr)->curr_pages + (pb_ptr)->mpmde_count))) { \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_len != (page_buf)->il_len) || ((page_buf)->index_size != (page_buf)->il_size) || \
+ ((page_buf)->curr_pages < 0) || ((page_buf)->curr_rd_pages < 0) || \
+ ((page_buf)->curr_md_pages < 0) || \
+ (((page_buf)->curr_pages != ((page_buf)->curr_md_pages + (page_buf)->curr_rd_pages))) || \
+ ((page_buf)->mpmde_count < 0) || \
+ ((page_buf)->index_len != ((page_buf)->curr_pages + (page_buf)->mpmde_count))) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "post HT remove SC failed") \
}
-#define H5PB__PRE_HT_SEARCH_SC(pb_ptr, page, fail_val) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
+#define H5PB__PRE_HT_SEARCH_SC(page_buf, page, fail_val) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
(H5PB__HASH_FCN(page) < 0) || (H5PB__HASH_FCN(page) >= H5PB__HASH_TABLE_LEN)) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, fail_val, "pre HT search SC failed") \
}
-#define H5PB__POST_SUC_HT_SEARCH_SC(pb_ptr, entry_ptr, k, fail_val) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || ((pb_ptr)->index_len < 1) || \
- ((entry_ptr) == NULL) || ((pb_ptr)->index_size < (int64_t)((entry_ptr)->size)) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((entry_ptr)->size <= 0) || (((pb_ptr)->ht)[k] == NULL) || \
- ((((pb_ptr)->ht)[k] != (entry_ptr)) && ((entry_ptr)->ht_prev == NULL)) || \
- ((((pb_ptr)->ht)[k] == (entry_ptr)) && ((entry_ptr)->ht_prev != NULL)) || \
+#define H5PB__POST_SUC_HT_SEARCH_SC(page_buf, entry_ptr, k, fail_val) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || ((page_buf)->index_len < 1) || \
+ ((entry_ptr) == NULL) || ((page_buf)->index_size < (int64_t)((entry_ptr)->size)) || \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((entry_ptr)->size <= 0) || (((page_buf)->ht)[k] == NULL) || \
+ ((((page_buf)->ht)[k] != (entry_ptr)) && ((entry_ptr)->ht_prev == NULL)) || \
+ ((((page_buf)->ht)[k] == (entry_ptr)) && ((entry_ptr)->ht_prev != NULL)) || \
(((entry_ptr)->ht_prev != NULL) && ((entry_ptr)->ht_prev->ht_next != (entry_ptr))) || \
(((entry_ptr)->ht_next != NULL) && ((entry_ptr)->ht_next->ht_prev != (entry_ptr)))) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \
}
-#define H5PB__POST_HT_SHIFT_TO_FRONT_SC(pb_ptr, entry_ptr, k, fail_val) \
- if (((pb_ptr) == NULL) || (((pb_ptr)->ht)[k] != (entry_ptr)) || ((entry_ptr)->ht_prev != NULL)) { \
+#define H5PB__POST_HT_SHIFT_TO_FRONT_SC(page_buf, entry_ptr, k, fail_val) \
+ if (((page_buf) == NULL) || (((page_buf)->ht)[k] != (entry_ptr)) || ((entry_ptr)->ht_prev != NULL)) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \
}
-#define H5PB__PRE_HT_ENTRY_SIZE_CHANGE_SC(pb_ptr, old_size, new_size, entry_ptr, was_clean) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->index_len <= 0) || ((pb_ptr)->index_size <= 0) || \
- ((new_size) <= 0) || ((old_size) > (pb_ptr)->index_size) || \
- (((pb_ptr)->index_len == 1) && ((pb_ptr)->index_size != (old_size))) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size)) || \
- ((!(was_clean) || ((pb_ptr)->clean_index_size < (old_size))) && \
- (((was_clean)) || ((pb_ptr)->dirty_index_size < (old_size)))) || \
- ((entry_ptr) == NULL) || ((pb_ptr)->index_len != (pb_ptr)->il_len) || \
- ((pb_ptr)->index_size != (pb_ptr)->il_size)) { \
+#define H5PB__PRE_HT_ENTRY_SIZE_CHANGE_SC(page_buf, old_size, new_size, entry_ptr, was_clean) \
+ if (((page_buf) == NULL) || ((page_buf)->index_len <= 0) || ((page_buf)->index_size <= 0) || \
+ ((new_size) <= 0) || ((old_size) > (page_buf)->index_size) || \
+ (((page_buf)->index_len == 1) && ((page_buf)->index_size != (old_size))) || \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size)) || \
+ ((!(was_clean) || ((page_buf)->clean_index_size < (old_size))) && \
+ (((was_clean)) || ((page_buf)->dirty_index_size < (old_size)))) || \
+ ((entry_ptr) == NULL) || ((page_buf)->index_len != (page_buf)->il_len) || \
+ ((page_buf)->index_size != (page_buf)->il_size)) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \
}
-#define H5PB__POST_HT_ENTRY_SIZE_CHANGE_SC(pb_ptr, old_size, new_size, entry_ptr) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->index_len <= 0) || ((pb_ptr)->index_size <= 0) || \
- ((new_size) > (pb_ptr)->index_size) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size)) || \
- ((!((entry_ptr)->is_dirty) || ((pb_ptr)->dirty_index_size < (new_size))) && \
- ((((entry_ptr)->is_dirty)) || ((pb_ptr)->clean_index_size < (new_size)))) || \
- (((pb_ptr)->index_len == 1) && ((pb_ptr)->index_size != (new_size))) || \
- ((pb_ptr)->index_len != (pb_ptr)->il_len) || ((pb_ptr)->index_size != (pb_ptr)->il_size)) { \
+#define H5PB__POST_HT_ENTRY_SIZE_CHANGE_SC(page_buf, old_size, new_size, entry_ptr) \
+ if (((page_buf) == NULL) || ((page_buf)->index_len <= 0) || ((page_buf)->index_size <= 0) || \
+ ((new_size) > (page_buf)->index_size) || \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size)) || \
+ ((!((entry_ptr)->is_dirty) || ((page_buf)->dirty_index_size < (new_size))) && \
+ ((((entry_ptr)->is_dirty)) || ((page_buf)->clean_index_size < (new_size)))) || \
+ (((page_buf)->index_len == 1) && ((page_buf)->index_size != (new_size))) || \
+ ((page_buf)->index_len != (page_buf)->il_len) || ((page_buf)->index_size != (page_buf)->il_size)) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \
}
-#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(pb_ptr, entry_ptr) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || ((pb_ptr)->index_len <= 0) || \
+#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(page_buf, entry_ptr) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || ((page_buf)->index_len <= 0) || \
((entry_ptr) == NULL) || ((entry_ptr)->is_dirty != FALSE) || \
- ((pb_ptr)->index_size < (int64_t)((entry_ptr)->size)) || \
- ((pb_ptr)->dirty_index_size < (int64_t)((entry_ptr)->size)) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size))) { \
+ ((page_buf)->index_size < (int64_t)((entry_ptr)->size)) || \
+ ((page_buf)->dirty_index_size < (int64_t)((entry_ptr)->size)) || \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size))) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \
}
-#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(pb_ptr, entry_ptr) \
- if (((pb_ptr) == NULL) || ((pb_ptr)->magic != H5PB__H5PB_T_MAGIC) || ((pb_ptr)->index_len <= 0) || \
+#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(page_buf, entry_ptr) \
+ if (((page_buf) == NULL) || ((page_buf)->magic != H5PB__H5PB_T_MAGIC) || ((page_buf)->index_len <= 0) || \
((entry_ptr) == NULL) || ((entry_ptr)->is_dirty != TRUE) || \
- ((pb_ptr)->index_size < (int64_t)((entry_ptr)->size)) || \
- ((pb_ptr)->clean_index_size < (int64_t)((entry_ptr)->size)) || \
- ((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size))) { \
+ ((page_buf)->index_size < (int64_t)((entry_ptr)->size)) || \
+ ((page_buf)->clean_index_size < (int64_t)((entry_ptr)->size)) || \
+ ((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size))) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \
}
-#define H5PB__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(pb_ptr, entry_ptr) \
- if (((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size))) { \
+#define H5PB__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(page_buf, entry_ptr) \
+ if (((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size))) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \
}
-#define H5PB__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(pb_ptr, entry_ptr) \
- if (((pb_ptr)->index_size != ((pb_ptr)->clean_index_size + (pb_ptr)->dirty_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->clean_index_size)) || \
- ((pb_ptr)->index_size < ((pb_ptr)->dirty_index_size))) { \
+#define H5PB__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(page_buf, entry_ptr) \
+ if (((page_buf)->index_size != ((page_buf)->clean_index_size + (page_buf)->dirty_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->clean_index_size)) || \
+ ((page_buf)->index_size < ((page_buf)->dirty_index_size))) { \
HGOTO_ERROR(H5E_PAGEBUF, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \
}
#else /* H5PB__DO_SANITY_CHECKS */
-#define H5PB__PRE_HT_INSERT_SC(pb_ptr, entry_ptr, fail_val)
-#define H5PB__POST_HT_INSERT_SC(pb_ptr, entry_ptr, fail_val)
-#define H5PB__PRE_HT_REMOVE_SC(pb_ptr, entry_ptr)
-#define H5PB__POST_HT_REMOVE_SC(pb_ptr, entry_ptr)
-#define H5PB__PRE_HT_SEARCH_SC(pb_ptr, Addr, fail_val)
-#define H5PB__POST_SUC_HT_SEARCH_SC(pb_ptr, entry_ptr, k, fail_val)
-#define H5PB__POST_HT_SHIFT_TO_FRONT_SC(pb_ptr, entry_ptr, k, fail_val)
-#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(pb_ptr, entry_ptr)
-#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(pb_ptr, entry_ptr)
-#define H5PB__PRE_HT_ENTRY_SIZE_CHANGE_SC(pb_ptr, old_size, new_size, entry_ptr, was_clean)
-#define H5PB__POST_HT_ENTRY_SIZE_CHANGE_SC(pb_ptr, old_size, new_size, entry_ptr)
-#define H5PB__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(pb_ptr, entry_ptr)
-#define H5PB__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(pb_ptr, entry_ptr)
+#define H5PB__PRE_HT_INSERT_SC(page_buf, entry_ptr, fail_val)
+#define H5PB__POST_HT_INSERT_SC(page_buf, entry_ptr, fail_val)
+#define H5PB__PRE_HT_REMOVE_SC(page_buf, entry_ptr)
+#define H5PB__POST_HT_REMOVE_SC(page_buf, entry_ptr)
+#define H5PB__PRE_HT_SEARCH_SC(page_buf, Addr, fail_val)
+#define H5PB__POST_SUC_HT_SEARCH_SC(page_buf, entry_ptr, k, fail_val)
+#define H5PB__POST_HT_SHIFT_TO_FRONT_SC(page_buf, entry_ptr, k, fail_val)
+#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(page_buf, entry_ptr)
+#define H5PB__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(page_buf, entry_ptr)
+#define H5PB__PRE_HT_ENTRY_SIZE_CHANGE_SC(page_buf, old_size, new_size, entry_ptr, was_clean)
+#define H5PB__POST_HT_ENTRY_SIZE_CHANGE_SC(page_buf, old_size, new_size, entry_ptr)
+#define H5PB__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(page_buf, entry_ptr)
+#define H5PB__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(page_buf, entry_ptr)
#endif /* H5PB__DO_SANITY_CHECKS */
-#define H5PB__INSERT_IN_INDEX(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__INSERT_IN_INDEX(page_buf, entry_ptr, fail_val) \
{ \
int k; \
- H5PB__PRE_HT_INSERT_SC(pb_ptr, entry_ptr, fail_val) \
+ H5PB__PRE_HT_INSERT_SC(page_buf, entry_ptr, fail_val) \
k = H5PB__HASH_FCN((entry_ptr)->page); \
- if (((pb_ptr)->ht)[k] != NULL) { \
- (entry_ptr)->ht_next = ((pb_ptr)->ht)[k]; \
+ if (((page_buf)->ht)[k] != NULL) { \
+ (entry_ptr)->ht_next = ((page_buf)->ht)[k]; \
(entry_ptr)->ht_next->ht_prev = (entry_ptr); \
} \
- ((pb_ptr)->ht)[k] = (entry_ptr); \
- (pb_ptr)->index_len++; \
- (pb_ptr)->index_size += (int64_t)((entry_ptr)->size); \
+ ((page_buf)->ht)[k] = (entry_ptr); \
+ (page_buf)->index_len++; \
+ (page_buf)->index_size += (int64_t)((entry_ptr)->size); \
if ((entry_ptr)->is_dirty) { \
- (pb_ptr)->dirty_index_size += (int64_t)((entry_ptr)->size); \
+ (page_buf)->dirty_index_size += (int64_t)((entry_ptr)->size); \
} \
else { \
- (pb_ptr)->clean_index_size += (int64_t)((entry_ptr)->size); \
+ (page_buf)->clean_index_size += (int64_t)((entry_ptr)->size); \
} \
if ((entry_ptr)->is_metadata) { \
if ((entry_ptr)->is_mpmde) { \
- ((pb_ptr)->mpmde_count)++; \
+ ((page_buf)->mpmde_count)++; \
} \
else { \
- ((pb_ptr)->curr_md_pages)++; \
- (pb_ptr)->curr_pages++; \
+ ((page_buf)->curr_md_pages)++; \
+ (page_buf)->curr_pages++; \
} \
} \
else { \
- ((pb_ptr)->curr_rd_pages)++; \
- (pb_ptr)->curr_pages++; \
+ ((page_buf)->curr_rd_pages)++; \
+ (page_buf)->curr_pages++; \
} \
- H5PB__IL_DLL_APPEND((entry_ptr), (pb_ptr)->il_head, (pb_ptr)->il_tail, (pb_ptr)->il_len, \
- (pb_ptr)->il_size, fail_val) \
- H5PB__UPDATE_STATS_FOR_HT_INSERTION(pb_ptr) \
- H5PB__UPDATE_HT_SIZE_STATS(pb_ptr) \
- H5PB__POST_HT_INSERT_SC(pb_ptr, entry_ptr, fail_val) \
+ H5PB__IL_DLL_APPEND((entry_ptr), (page_buf)->il_head, (page_buf)->il_tail, (page_buf)->il_len, \
+ (page_buf)->il_size, fail_val) \
+ H5PB__UPDATE_STATS_FOR_HT_INSERTION(page_buf) \
+ H5PB__UPDATE_HT_SIZE_STATS(page_buf) \
+ H5PB__POST_HT_INSERT_SC(page_buf, entry_ptr, fail_val) \
}
-#define H5PB__DELETE_FROM_INDEX(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__DELETE_FROM_INDEX(page_buf, entry_ptr, fail_val) \
{ \
int k; \
- H5PB__PRE_HT_REMOVE_SC(pb_ptr, entry_ptr) \
+ H5PB__PRE_HT_REMOVE_SC(page_buf, entry_ptr) \
k = H5PB__HASH_FCN((entry_ptr)->page); \
if ((entry_ptr)->ht_next) \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
if ((entry_ptr)->ht_prev) \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- if (((pb_ptr)->ht)[k] == (entry_ptr)) \
- ((pb_ptr)->ht)[k] = (entry_ptr)->ht_next; \
+ if (((page_buf)->ht)[k] == (entry_ptr)) \
+ ((page_buf)->ht)[k] = (entry_ptr)->ht_next; \
(entry_ptr)->ht_next = NULL; \
(entry_ptr)->ht_prev = NULL; \
- (pb_ptr)->index_len--; \
- (pb_ptr)->index_size -= (int64_t)((entry_ptr)->size); \
+ (page_buf)->index_len--; \
+ (page_buf)->index_size -= (int64_t)((entry_ptr)->size); \
if ((entry_ptr)->is_dirty) { \
- (pb_ptr)->dirty_index_size -= (int64_t)((entry_ptr)->size); \
+ (page_buf)->dirty_index_size -= (int64_t)((entry_ptr)->size); \
} \
else { \
- (pb_ptr)->clean_index_size -= (int64_t)((entry_ptr)->size); \
+ (page_buf)->clean_index_size -= (int64_t)((entry_ptr)->size); \
} \
if ((entry_ptr)->is_metadata) { \
if ((entry_ptr)->is_mpmde) { \
- ((pb_ptr)->mpmde_count)--; \
+ ((page_buf)->mpmde_count)--; \
} \
else { \
- ((pb_ptr)->curr_md_pages)--; \
- (pb_ptr)->curr_pages--; \
+ ((page_buf)->curr_md_pages)--; \
+ (page_buf)->curr_pages--; \
} \
} \
else { \
- ((pb_ptr)->curr_rd_pages)--; \
- (pb_ptr)->curr_pages--; \
+ ((page_buf)->curr_rd_pages)--; \
+ (page_buf)->curr_pages--; \
} \
- H5PB__IL_DLL_REMOVE((entry_ptr), (pb_ptr)->il_head, (pb_ptr)->il_tail, (pb_ptr)->il_len, \
- (pb_ptr)->il_size, fail_val) \
- H5PB__UPDATE_STATS_FOR_HT_DELETION(pb_ptr) \
- H5PB__POST_HT_REMOVE_SC(pb_ptr, entry_ptr) \
+ H5PB__IL_DLL_REMOVE((entry_ptr), (page_buf)->il_head, (page_buf)->il_tail, (page_buf)->il_len, \
+ (page_buf)->il_size, fail_val) \
+ H5PB__UPDATE_STATS_FOR_HT_DELETION(page_buf) \
+ H5PB__POST_HT_REMOVE_SC(page_buf, entry_ptr) \
}
-#define H5PB__SEARCH_INDEX(pb_ptr, Page, entry_ptr, fail_val) \
+#define H5PB__SEARCH_INDEX(page_buf, Page, entry_ptr, fail_val) \
{ \
int k; \
int depth = 0; \
- H5PB__PRE_HT_SEARCH_SC(pb_ptr, Page, fail_val) \
+ H5PB__PRE_HT_SEARCH_SC(page_buf, Page, fail_val) \
k = H5PB__HASH_FCN(Page); \
- entry_ptr = ((pb_ptr)->ht)[k]; \
+ entry_ptr = ((page_buf)->ht)[k]; \
while (entry_ptr) { \
if ((Page) == (entry_ptr)->page) { \
- H5PB__POST_SUC_HT_SEARCH_SC(pb_ptr, entry_ptr, k, fail_val) \
- if ((entry_ptr) != ((pb_ptr)->ht)[k]) { \
+ H5PB__POST_SUC_HT_SEARCH_SC(page_buf, entry_ptr, k, fail_val) \
+ if ((entry_ptr) != ((page_buf)->ht)[k]) { \
if ((entry_ptr)->ht_next) \
(entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
HDassert((entry_ptr)->ht_prev != NULL); \
(entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- ((pb_ptr)->ht)[k]->ht_prev = (entry_ptr); \
- (entry_ptr)->ht_next = ((pb_ptr)->ht)[k]; \
+ ((page_buf)->ht)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((page_buf)->ht)[k]; \
(entry_ptr)->ht_prev = NULL; \
- ((pb_ptr)->ht)[k] = (entry_ptr); \
- H5PB__POST_HT_SHIFT_TO_FRONT_SC(pb_ptr, entry_ptr, k, fail_val) \
+ ((page_buf)->ht)[k] = (entry_ptr); \
+ H5PB__POST_HT_SHIFT_TO_FRONT_SC(page_buf, entry_ptr, k, fail_val) \
} \
break; \
} \
(entry_ptr) = (entry_ptr)->ht_next; \
(depth)++; \
} \
- H5PB__UPDATE_STATS_FOR_HT_SEARCH(pb_ptr, (entry_ptr != NULL), depth) \
+ H5PB__UPDATE_STATS_FOR_HT_SEARCH(page_buf, (entry_ptr != NULL), depth) \
}
-#define H5PB__UPDATE_INDEX_FOR_ENTRY_CLEAN(pb_ptr, entry_ptr) \
+#define H5PB__UPDATE_INDEX_FOR_ENTRY_CLEAN(page_buf, entry_ptr) \
{ \
- H5PB__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(pb_ptr, entry_ptr); \
- (pb_ptr)->dirty_index_size -= (int64_t)((entry_ptr)->size); \
- (pb_ptr)->clean_index_size += (int64_t)((entry_ptr)->size); \
- H5PB__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(pb_ptr, entry_ptr); \
+ H5PB__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(page_buf, entry_ptr); \
+ (page_buf)->dirty_index_size -= (int64_t)((entry_ptr)->size); \
+ (page_buf)->clean_index_size += (int64_t)((entry_ptr)->size); \
+ H5PB__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(page_buf, entry_ptr); \
}
-#define H5PB__UPDATE_INDEX_FOR_ENTRY_DIRTY(pb_ptr, entry_ptr) \
+#define H5PB__UPDATE_INDEX_FOR_ENTRY_DIRTY(page_buf, entry_ptr) \
{ \
- H5PB__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(pb_ptr, entry_ptr); \
- (pb_ptr)->clean_index_size -= (int64_t)((entry_ptr)->size); \
- (pb_ptr)->dirty_index_size += (int64_t)((entry_ptr)->size); \
- H5PB__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(pb_ptr, entry_ptr); \
+ H5PB__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(page_buf, entry_ptr); \
+ (page_buf)->clean_index_size -= (int64_t)((entry_ptr)->size); \
+ (page_buf)->dirty_index_size += (int64_t)((entry_ptr)->size); \
+ H5PB__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(page_buf, entry_ptr); \
}
-#define H5PB__UPDATE_INDEX_FOR_SIZE_CHANGE(pb_ptr, old_size, new_size, entry_ptr, was_clean) \
+#define H5PB__UPDATE_INDEX_FOR_SIZE_CHANGE(page_buf, old_size, new_size, entry_ptr, was_clean) \
{ \
- H5PB__PRE_HT_ENTRY_SIZE_CHANGE_SC(pb_ptr, old_size, new_size, entry_ptr, was_clean) \
- (pb_ptr)->index_size -= (old_size); \
- (pb_ptr)->index_size += (new_size); \
+ H5PB__PRE_HT_ENTRY_SIZE_CHANGE_SC(page_buf, old_size, new_size, entry_ptr, was_clean) \
+ (page_buf)->index_size -= (old_size); \
+ (page_buf)->index_size += (new_size); \
if (was_clean) { \
- (pb_ptr)->clean_index_size -= (old_size); \
+ (page_buf)->clean_index_size -= (old_size); \
} \
else { \
- (pb_ptr)->dirty_index_size -= (old_size); \
+ (page_buf)->dirty_index_size -= (old_size); \
} \
if ((entry_ptr)->is_dirty) { \
- (pb_ptr)->dirty_index_size += (new_size); \
+ (page_buf)->dirty_index_size += (new_size); \
} \
else { \
- (pb_ptr)->clean_index_size += (new_size); \
+ (page_buf)->clean_index_size += (new_size); \
} \
- H5PB__DLL_UPDATE_FOR_SIZE_CHANGE((pb_ptr)->il_len, (pb_ptr)->il_size, (old_size), (new_size)) \
- H5PB__POST_HT_ENTRY_SIZE_CHANGE_SC(pb_ptr, old_size, new_size, entry_ptr) \
+ H5PB__DLL_UPDATE_FOR_SIZE_CHANGE((page_buf)->il_len, (page_buf)->il_size, (old_size), (new_size)) \
+ H5PB__POST_HT_ENTRY_SIZE_CHANGE_SC(page_buf, old_size, new_size, entry_ptr) \
}
/***********************************************************************
@@ -1094,21 +1096,21 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__UPDATE_RP_FOR_EVICTION(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__UPDATE_RP_FOR_EVICTION(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
HDassert(!((entry_ptr)->is_dirty)); \
- HDassert((entry_ptr)->size >= pb_ptr->page_size); \
+ HDassert((entry_ptr)->size >= page_buf->page_size); \
\
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list. */ \
\
- H5PB__DLL_REMOVE((entry_ptr), (pb_ptr)->LRU_head_ptr, (pb_ptr)->LRU_tail_ptr, (pb_ptr)->LRU_len, \
- (pb_ptr)->LRU_size, (fail_val)) \
+ H5PB__DLL_REMOVE((entry_ptr), (page_buf)->LRU_head_ptr, (page_buf)->LRU_tail_ptr, \
+ (page_buf)->LRU_len, (page_buf)->LRU_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
\
@@ -1141,21 +1143,21 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__UPDATE_RP_FOR_REMOVE(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__UPDATE_RP_FOR_REMOVE(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
HDassert(!((entry_ptr)->is_mpmde)); \
- HDassert((entry_ptr)->size == pb_ptr->page_size); \
+ HDassert((entry_ptr)->size == page_buf->page_size); \
\
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list. */ \
\
- H5PB__DLL_REMOVE((entry_ptr), (pb_ptr)->LRU_head_ptr, (pb_ptr)->LRU_tail_ptr, (pb_ptr)->LRU_len, \
- (pb_ptr)->LRU_size, (fail_val)) \
+ H5PB__DLL_REMOVE((entry_ptr), (page_buf)->LRU_head_ptr, (page_buf)->LRU_tail_ptr, \
+ (page_buf)->LRU_len, (page_buf)->LRU_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
\
@@ -1184,23 +1186,23 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
- HDassert((entry_ptr)->size >= pb_ptr->page_size); \
+ HDassert((entry_ptr)->size >= page_buf->page_size); \
\
/* modified LRU specific code */ \
\
/* Move entry to the head of the LRU */ \
\
- H5PB__DLL_REMOVE((entry_ptr), (pb_ptr)->LRU_head_ptr, (pb_ptr)->LRU_tail_ptr, (pb_ptr)->LRU_len, \
- (pb_ptr)->LRU_size, (fail_val)) \
+ H5PB__DLL_REMOVE((entry_ptr), (page_buf)->LRU_head_ptr, (page_buf)->LRU_tail_ptr, \
+ (page_buf)->LRU_len, (page_buf)->LRU_size, (fail_val)) \
\
- H5PB__DLL_PREPEND((entry_ptr), (pb_ptr)->LRU_head_ptr, (pb_ptr)->LRU_tail_ptr, (pb_ptr)->LRU_len, \
- (pb_ptr)->LRU_size, (fail_val)) \
+ H5PB__DLL_PREPEND((entry_ptr), (page_buf)->LRU_head_ptr, (page_buf)->LRU_tail_ptr, \
+ (page_buf)->LRU_len, (page_buf)->LRU_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
\
@@ -1229,9 +1231,9 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__UPDATE_RP_FOR_FLUSH(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__UPDATE_RP_FOR_FLUSH(page_buf, entry_ptr, fail_val) \
{ \
- H5PB__UPDATE_RP_FOR_ACCESS(pb_ptr, entry_ptr, fail_val) \
+ H5PB__UPDATE_RP_FOR_ACCESS(page_buf, entry_ptr, fail_val) \
\
} /* H5PB__UPDATE_RP_FOR_FLUSH */
@@ -1262,22 +1264,22 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__UPDATE_RP_FOR_INSERT_APPEND(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__UPDATE_RP_FOR_INSERT_APPEND(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
- HDassert((entry_ptr)->size == pb_ptr->page_size); \
+ HDassert((entry_ptr)->size == page_buf->page_size); \
\
/* modified LRU specific code */ \
\
/* insert the entry at the tail of the LRU list. */ \
\
- H5PB__DLL_APPEND((entry_ptr), (pb_ptr)->LRU_head_ptr, (pb_ptr)->LRU_tail_ptr, (pb_ptr)->LRU_len, \
- (pb_ptr)->LRU_size, (fail_val)) \
+ H5PB__DLL_APPEND((entry_ptr), (page_buf)->LRU_head_ptr, (page_buf)->LRU_tail_ptr, \
+ (page_buf)->LRU_len, (page_buf)->LRU_size, (fail_val)) \
\
- H5PB__UPDATE_LRU_SIZE_STATS(pb_ptr) \
+ H5PB__UPDATE_LRU_SIZE_STATS(page_buf) \
\
/* End modified LRU specific code. */ \
}
@@ -1305,22 +1307,22 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__UPDATE_RP_FOR_INSERTION(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__UPDATE_RP_FOR_INSERTION(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
- HDassert((entry_ptr)->size >= pb_ptr->page_size); \
+ HDassert((entry_ptr)->size >= page_buf->page_size); \
\
/* modified LRU specific code */ \
\
/* insert the entry at the head of the LRU list. */ \
\
- H5PB__DLL_PREPEND((entry_ptr), (pb_ptr)->LRU_head_ptr, (pb_ptr)->LRU_tail_ptr, (pb_ptr)->LRU_len, \
- (pb_ptr)->LRU_size, (fail_val)) \
+ H5PB__DLL_PREPEND((entry_ptr), (page_buf)->LRU_head_ptr, (page_buf)->LRU_tail_ptr, \
+ (page_buf)->LRU_len, (page_buf)->LRU_size, (fail_val)) \
\
- H5PB__UPDATE_LRU_SIZE_STATS(pb_ptr) \
+ H5PB__UPDATE_LRU_SIZE_STATS(page_buf) \
\
/* End modified LRU specific code. */ \
}
@@ -1367,22 +1369,22 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__INSERT_IN_TL(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__INSERT_IN_TL(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
- HDassert((pb_ptr)->vfd_swmr_writer); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)->vfd_swmr_writer); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
HDassert((entry_ptr)->modified_this_tick); \
- HDassert((entry_ptr)->size >= pb_ptr->page_size); \
+ HDassert((entry_ptr)->size >= page_buf->page_size); \
\
/* insert the entry at the head of the tick list. */ \
\
- H5PB__TL_DLL_PREPEND((entry_ptr), (pb_ptr)->tl_head_ptr, (pb_ptr)->tl_tail_ptr, (pb_ptr)->tl_len, \
- (pb_ptr)->tl_size, (fail_val)) \
+ H5PB__TL_DLL_PREPEND((entry_ptr), (page_buf)->tl_head_ptr, (page_buf)->tl_tail_ptr, \
+ (page_buf)->tl_len, (page_buf)->tl_size, (fail_val)) \
\
- H5PB__UPDATE_TL_SIZE_STATS(pb_ptr) \
+ H5PB__UPDATE_TL_SIZE_STATS(page_buf) \
\
} /* H5PB__INSERT_IN_TL */
@@ -1403,20 +1405,20 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__REMOVE_FROM_TL(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__REMOVE_FROM_TL(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
- HDassert((pb_ptr)->vfd_swmr_writer); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)->vfd_swmr_writer); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
HDassert((entry_ptr)->modified_this_tick); \
- HDassert((entry_ptr)->size >= pb_ptr->page_size); \
+ HDassert((entry_ptr)->size >= page_buf->page_size); \
\
/* remove the entry from the tick list. */ \
\
- H5PB__TL_DLL_REMOVE((entry_ptr), (pb_ptr)->tl_head_ptr, (pb_ptr)->tl_tail_ptr, (pb_ptr)->tl_len, \
- (pb_ptr)->tl_size, (fail_val)) \
+ H5PB__TL_DLL_REMOVE((entry_ptr), (page_buf)->tl_head_ptr, (page_buf)->tl_tail_ptr, \
+ (page_buf)->tl_len, (page_buf)->tl_size, (fail_val)) \
\
} /* H5PB__REMOVE_FROM_TL */
@@ -1461,7 +1463,7 @@
* entry_ptr->next == NULL ||
* entry_ptr->delay_write_until >= entry_ptr->next->delay_write_until
*
- * In passing update pb_ptr->max_delay if appropriate.
+ * In passing update page_buf->max_delay if appropriate.
*
* Return: N/A
*
@@ -1474,36 +1476,36 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__INSERT_IN_DWL(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__INSERT_IN_DWL(page_buf, entry_ptr, fail_val) \
{ \
int insertion_depth = 0; \
uint64_t delay; \
H5PB_entry_t *suc_ptr; \
\
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
- HDassert((pb_ptr)->vfd_swmr_writer); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)->vfd_swmr_writer); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
- HDassert((entry_ptr)->size >= pb_ptr->page_size); \
- HDassert((entry_ptr)->delay_write_until > (pb_ptr)->cur_tick); \
+ HDassert((entry_ptr)->size >= page_buf->page_size); \
+ HDassert((entry_ptr)->delay_write_until > (page_buf)->cur_tick); \
\
- delay = (entry_ptr)->delay_write_until - (pb_ptr)->cur_tick; \
- suc_ptr = pb_ptr->dwl_head_ptr; \
+ delay = (entry_ptr)->delay_write_until - (page_buf)->cur_tick; \
+ suc_ptr = page_buf->dwl_head_ptr; \
\
while ((suc_ptr) && ((suc_ptr)->delay_write_until > (entry_ptr)->delay_write_until)) { \
insertion_depth++; \
suc_ptr = suc_ptr->next; \
} \
\
- H5PB__DLL_INSERT_BEFORE((entry_ptr), (suc_ptr), (pb_ptr)->dwl_head_ptr, (pb_ptr)->dwl_tail_ptr, \
- (pb_ptr)->dwl_len, (pb_ptr)->dwl_size, (fail_val)) \
+ H5PB__DLL_INSERT_BEFORE((entry_ptr), (suc_ptr), (page_buf)->dwl_head_ptr, (page_buf)->dwl_tail_ptr, \
+ (page_buf)->dwl_len, (page_buf)->dwl_size, (fail_val)) \
\
- if (entry_ptr->delay_write_until > pb_ptr->max_delay) \
- pb_ptr->max_delay = entry_ptr->delay_write_until; \
+ if (entry_ptr->delay_write_until > page_buf->max_delay) \
+ page_buf->max_delay = entry_ptr->delay_write_until; \
\
- H5PB__UPDATE_DWL_SIZE_STATS(pb_ptr) \
- H5PB__UPDATE_DWL_DELAYED_WRITES(pb_ptr, insertion_depth, delay) \
+ H5PB__UPDATE_DWL_SIZE_STATS(page_buf) \
+ H5PB__UPDATE_DWL_DELAYED_WRITES(page_buf, insertion_depth, delay) \
\
} /* H5PB__INSERT_IN_DWL */
@@ -1525,20 +1527,20 @@
*-------------------------------------------------------------------------
*/
-#define H5PB__REMOVE_FROM_DWL(pb_ptr, entry_ptr, fail_val) \
+#define H5PB__REMOVE_FROM_DWL(page_buf, entry_ptr, fail_val) \
{ \
- HDassert((pb_ptr)); \
- HDassert((pb_ptr)->magic == H5PB__H5PB_T_MAGIC); \
- HDassert((pb_ptr)->vfd_swmr_writer); \
+ HDassert((page_buf)); \
+ HDassert((page_buf)->magic == H5PB__H5PB_T_MAGIC); \
+ HDassert((page_buf)->vfd_swmr_writer); \
HDassert((entry_ptr)); \
HDassert((entry_ptr)->magic == H5PB__H5PB_ENTRY_T_MAGIC); \
- HDassert((entry_ptr)->size >= pb_ptr->page_size); \
+ HDassert((entry_ptr)->size >= page_buf->page_size); \
HDassert((entry_ptr)->delay_write_until == 0); \
\
/* remove the entry from the delayed write list. */ \
\
- H5PB__DLL_REMOVE((entry_ptr), (pb_ptr)->dwl_head_ptr, (pb_ptr)->dwl_tail_ptr, (pb_ptr)->dwl_len, \
- (pb_ptr)->dwl_size, (fail_val)) \
+ H5PB__DLL_REMOVE((entry_ptr), (page_buf)->dwl_head_ptr, (page_buf)->dwl_tail_ptr, \
+ (page_buf)->dwl_len, (page_buf)->dwl_size, (fail_val)) \
\
} /* H5PB__REMOVE_FROM_DWLL */
@@ -1561,21 +1563,21 @@
* magic: Unsigned 32 bit integer that must always be set to
* H5PB__H5PB_ENTRY_T_MAGIC when the entry is valid.
*
- * pb_ptr: Pointer to the page buffer that contains this entry.
+ * page_buf: Pointer to the page buffer that contains this entry.
*
* addr: Base address of the page in the file.
*
- * page: Page offset of the page -- i.e. addr / pb_ptr->page_size.
- * Note that addr must always equal page * pb_ptr->page_size.
+ * page: Page offset of the page -- i.e. addr / page_buf->page_size.
+ * Note that addr must always equal page * page_buf->page_size.
*
* size: Size of the page buffer entry in bytes. Under normal
- * circumstance, this will always be equal to pb_ptr->page_size.
+ * circumstance, this will always be equal to page_buf->page_size.
* However, in the context of a VFD SWMR writer, the page
* buffer may be used to store multi-page metadata entries
* until the end of tick, or to delay writes of such entries
* for up to max_lag ticks.
*
- * In such cases, size must be greater than pb_ptr->page_size.
+ * In such cases, size must be greater than page_buf->page_size.
*
* image_ptr: Pointer to void. When not NULL, this field points to a
* dynamically allocated block of size bytes in which the
@@ -1650,7 +1652,7 @@
*
* Observe that:
*
- * is_mpmde <==> is_metadata && size > pb_ptr->page_size
+ * is_mpmde <==> is_metadata && size > page_buf->page_size
*
* loaded: Boolean flag that is set to TRUE iff the entry was loaded
* from file. This is a necessary input in determining
@@ -1659,7 +1661,7 @@
* This field is only maintained in the VFD SWMR case
* and should be false otherwise.
*
- * modified_this_tick: This field is set to TRUE iff pb_ptr->vfd_swrm_write
+ * modified_this_tick: This field is set to TRUE iff page_buf->vfd_swrm_write
* and the entry has been modified in the current tick. If
* modified_this_tick is TRUE, the entry must also be in the
* tick list.
@@ -1685,7 +1687,7 @@
struct H5PB_entry_t {
uint32_t magic;
- H5PB_t * pb_ptr;
+ H5PB_t * page_buf;
haddr_t addr;
uint64_t page;
size_t size;
@@ -1714,4 +1716,12 @@ struct H5PB_entry_t {
}; /* H5PB_entry_t */
+/*****************************/
+/* Package Private Variables */
+/*****************************/
+
+/******************************/
+/* Package Private Prototypes */
+/******************************/
+
#endif /* H5PBpkg_H */
diff --git a/src/H5TSpublic.h b/src/H5TSdevelop.h
index 41213f9..9e8f718 100644
--- a/src/H5TSpublic.h
+++ b/src/H5TSdevelop.h
@@ -2,7 +2,7 @@
* Copyright by The HDF Group. *
* All rights reserved. *
* *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
@@ -11,14 +11,12 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * This file contains public declarations for the H5TS (threadsafety) module.
+ * This file contains public declarations for the H5TS (threadsafety) developer
+ * support routines.
*/
-#ifndef H5TSpublic_H
-#define H5TSpublic_H
-
-/* Public headers needed by this file */
-#include "H5public.h" /* Generic Functions */
+#ifndef H5TSdevelop_H
+#define H5TSdevelop_H
/*****************/
/* Public Macros */
@@ -49,4 +47,4 @@ H5_DLL herr_t H5TSmutex_get_attempt_count(unsigned int *count);
}
#endif
-#endif /* H5TSpublic_H */
+#endif /* H5TSdevelop_H */
diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h
index 6f9f1c0..3150f59 100644
--- a/src/H5TSprivate.h
+++ b/src/H5TSprivate.h
@@ -25,8 +25,8 @@
#define H5TSprivate_H_
#ifdef H5_HAVE_THREADSAFE
-/* Public headers needed by this file */
-#include "H5TSpublic.h" /* Public API prototypes */
+/* Include package's public headers */
+#include "H5TSdevelop.h"
#ifdef H5_HAVE_WIN_THREADS
diff --git a/src/H5Tdevelop.h b/src/H5Tdevelop.h
new file mode 100644
index 0000000..e642d7c
--- /dev/null
+++ b/src/H5Tdevelop.h
@@ -0,0 +1,227 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This file contains public declarations for the H5T (datatype) developer
+ * support routines.
+ */
+
+#ifndef _H5Tdevelop_H
+#define _H5Tdevelop_H
+
+/* Include package's public header */
+#include "H5Tpublic.h"
+
+/*****************/
+/* Public Macros */
+/*****************/
+
+/*******************/
+/* Public Typedefs */
+/*******************/
+
+/**
+ * Commands sent to conversion functions
+ */
+typedef enum H5T_cmd_t {
+ H5T_CONV_INIT = 0, /**< query and/or initialize private data */
+ H5T_CONV_CONV = 1, /**< convert data from source to dest datatype */
+ H5T_CONV_FREE = 2 /**< function is being removed from path */
+} H5T_cmd_t;
+
+/**
+ * How is the `bkg' buffer used by the conversion function?
+ */
+typedef enum H5T_bkg_t {
+ H5T_BKG_NO = 0, /**< background buffer is not needed, send NULL */
+ H5T_BKG_TEMP = 1, /**< bkg buffer used as temp storage only */
+ H5T_BKG_YES = 2 /**< init bkg buf with data before conversion */
+} H5T_bkg_t;
+
+/**
+ * Type conversion client data
+ */
+//! <!-- [H5T_cdata_t_snip] -->
+typedef struct H5T_cdata_t {
+ H5T_cmd_t command; /**< what should the conversion function do? */
+ H5T_bkg_t need_bkg; /**< is the background buffer needed? */
+ hbool_t recalc; /**< recalculate private data */
+ void * priv; /**< private data */
+} H5T_cdata_t;
+//! <!-- [H5T_cdata_t_snip] -->
+
+/**
+ * Conversion function persistence
+ */
+typedef enum H5T_pers_t {
+ H5T_PERS_DONTCARE = -1, /**< wild card */
+ H5T_PERS_HARD = 0, /**< hard conversion function */
+ H5T_PERS_SOFT = 1 /**< soft conversion function */
+} H5T_pers_t;
+
+/**
+ * All datatype conversion functions are...
+ */
+//! <!-- [H5T_conv_t_snip] -->
+typedef herr_t (*H5T_conv_t)(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts, size_t buf_stride,
+ size_t bkg_stride, void *buf, void *bkg, hid_t dset_xfer_plist);
+//! <!-- [H5T_conv_t_snip] -->
+
+/********************/
+/* Public Variables */
+/********************/
+
+/*********************/
+/* Public Prototypes */
+/*********************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \ingroup CONV
+ *
+ * \brief Registers a datatype conversion function
+ *
+ * \param[in] pers Conversion function type
+ * \param[in] name Name displayed in diagnostic output
+ * \type_id{src_id} of source datatype
+ * \type_id{dst_id} of destination datatype
+ * \param[in] func Function to convert between source and destination datatypes
+ *
+ * \return \herr_t
+ *
+ * \details H5Tregister() registers a hard or soft conversion function for a
+ * datatype conversion path. The parameter \p pers indicates whether a
+ * conversion function is hard (#H5T_PERS_HARD) or soft
+ * (#H5T_PERS_SOFT). User-defined functions employing compiler casting
+ * are designated as \Emph{hard}; other user-defined conversion
+ * functions registered with the HDF5 library (with H5Tregister() )
+ * are designated as \Emph{soft}. The HDF5 library also has its own
+ * hard and soft conversion functions.
+ *
+ * A conversion path can have only one hard function. When type is
+ * #H5T_PERS_HARD, \p func replaces any previous hard function.
+ *
+ * When type is #H5T_PERS_SOFT, H5Tregister() adds the function to the
+ * end of the master soft list and replaces the soft function in all
+ * applicable existing conversion paths. Soft functions are used when
+ * determining which conversion function is appropriate for this path.
+ *
+ * The \p name is used only for debugging and should be a short
+ * identifier for the function.
+ *
+ * The path is specified by the source and destination datatypes \p
+ * src_id and \p dst_id. For soft conversion functions, only the class
+ * of these types is important.
+ *
+ * The type of the conversion function pointer is declared as:
+ * \snippet this H5T_conv_t_snip
+ *
+ * The \ref H5T_cdata_t \c struct is declared as:
+ * \snippet this H5T_cdata_t_snip
+ *
+ * \since 1.6.3 The following change occurred in the \ref H5T_conv_t function:
+ * the \c nelmts parameter type changed to size_t.
+ *
+ */
+H5_DLL herr_t H5Tregister(H5T_pers_t pers, const char *name, hid_t src_id, hid_t dst_id, H5T_conv_t func);
+/**
+ * \ingroup CONV
+ *
+ * \brief Removes a conversion function
+ *
+ * \param[in] pers Conversion function type
+ * \param[in] name Name displayed in diagnostic output
+ * \type_id{src_id} of source datatype
+ * \type_id{dst_id} of destination datatype
+ * \param[in] func Function to convert between source and destination datatypes
+ *
+ * \return \herr_t
+ *
+ * \details H5Tunregister() removes a conversion function matching criteria
+ * such as soft or hard conversion, source and destination types, and
+ * the conversion function.
+ *
+ * If a user is trying to remove a conversion function he registered,
+ * all parameters can be used. If he is trying to remove a library’s
+ * default conversion function, there is no guarantee the \p name and
+ * \p func parameters will match the user’s chosen values. Passing in
+ * some values may cause this function to fail. A good practice is to
+ * pass in NULL as their values.
+ *
+ * All parameters are optional. The missing parameters will be used to
+ * generalize the search criteria.
+ *
+ * The conversion function pointer type declaration is described in
+ * H5Tregister().
+ *
+ * \version 1.6.3 The following change occurred in the \ref H5T_conv_t function:
+ * the \c nelmts parameter type changed to size_t.
+ *
+ */
+H5_DLL herr_t H5Tunregister(H5T_pers_t pers, const char *name, hid_t src_id, hid_t dst_id, H5T_conv_t func);
+/**
+ * \ingroup CONV
+ *
+ * \brief Finds a conversion function
+ *
+ * \type_id{src_id} of source datatype
+ * \type_id{dst_id} of destination datatype
+ * \param[out] pcdata Pointer to type conversion data
+ *
+ * \return Returns a pointer to a suitable conversion function if successful.
+ * Otherwise returns NULL.
+ *
+ * \details H5Tfind() finds a conversion function that can handle a conversion
+ * from type \p src_id to type \p dst_id. The \p pcdata argument is a
+ * pointer to a pointer to type conversion data which was created and
+ * initialized by the soft type conversion function of this path when
+ * the conversion function was installed on the path.
+ *
+ */
+H5_DLL H5T_conv_t H5Tfind(hid_t src_id, hid_t dst_id, H5T_cdata_t **pcdata);
+/**
+ * \ingroup CONV
+ *
+ * \brief Check whether the library’s default conversion is hard conversion
+ *
+ * \type_id{src_id} of source datatype
+ * \type_id{dst_id} of destination datatype
+ *
+ * \return \htri_t
+ *
+ * \details H5Tcompiler_conv() determines whether the library’s conversion
+ * function from type \p src_id to type \p dst_id is a compiler (hard)
+ * conversion or not. A compiler conversion uses compiler’s casting; a
+ * library (soft) conversion uses the library’s own conversion
+ * function.
+ *
+ * \since 1.8.0
+ *
+ */
+H5_DLL htri_t H5Tcompiler_conv(hid_t src_id, hid_t dst_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Symbols defined for compatibility with previous versions of the HDF5 API.
+ *
+ * Use of these symbols is deprecated.
+ */
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+#endif /* _H5Tdevelop_H */
diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h
index 2507afc..2a45e17 100644
--- a/src/H5Tprivate.h
+++ b/src/H5Tprivate.h
@@ -20,8 +20,9 @@
/* Early typedefs to avoid circular dependencies */
typedef struct H5T_t H5T_t;
-/* Get package's public header */
+/* Include package's public headers */
#include "H5Tpublic.h"
+#include "H5Tdevelop.h"
/* Other public headers needed by this file */
#include "H5MMpublic.h" /* Memory management */
diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h
index 70a119d..bb5b0ef 100644
--- a/src/H5Tpublic.h
+++ b/src/H5Tpublic.h
@@ -153,52 +153,13 @@ typedef enum H5T_pad_t {
//! <!-- [H5T_pad_t_snip] -->
/**
- * Commands sent to conversion functions
- */
-typedef enum H5T_cmd_t {
- H5T_CONV_INIT = 0, /**< query and/or initialize private data */
- H5T_CONV_CONV = 1, /**< convert data from source to dest datatype */
- H5T_CONV_FREE = 2 /**< function is being removed from path */
-} H5T_cmd_t;
-
-/**
- * How is the `bkg' buffer used by the conversion function?
- */
-typedef enum H5T_bkg_t {
- H5T_BKG_NO = 0, /**< background buffer is not needed, send NULL */
- H5T_BKG_TEMP = 1, /**< bkg buffer used as temp storage only */
- H5T_BKG_YES = 2 /**< init bkg buf with data before conversion */
-} H5T_bkg_t;
-
-/**
- * Type conversion client data
- */
-//! <!-- [H5T_cdata_t_snip] -->
-typedef struct H5T_cdata_t {
- H5T_cmd_t command; /**< what should the conversion function do? */
- H5T_bkg_t need_bkg; /**< is the background buffer needed? */
- hbool_t recalc; /**< recalculate private data */
- void * priv; /**< private data */
-} H5T_cdata_t;
-//! <!-- [H5T_cdata_t_snip] -->
-
-/**
- * Conversion function persistence
- */
-typedef enum H5T_pers_t {
- H5T_PERS_DONTCARE = -1, /**< wild card */
- H5T_PERS_HARD = 0, /**< hard conversion function */
- H5T_PERS_SOFT = 1 /**< soft conversion function */
-} H5T_pers_t;
-
-/**
* The order to retrieve atomic native datatype
*/
//! <!-- [H5T_direction_t_snip] -->
typedef enum H5T_direction_t {
- H5T_DIR_DEFAULT = 0, /**< default direction is inscendent */
- H5T_DIR_ASCEND = 1, /**< in inscendent order */
- H5T_DIR_DESCEND = 2 /**< in descendent order */
+ H5T_DIR_DEFAULT = 0, /**< default direction is ascending */
+ H5T_DIR_ASCEND = 1, /**< in ascending order */
+ H5T_DIR_DESCEND = 2 /**< in descending order */
} H5T_direction_t;
//! <!-- [H5T_direction_t_snip] -->
@@ -258,14 +219,6 @@ typedef struct {
extern "C" {
#endif
-/**
- * All datatype conversion functions are...
- */
-//! <!-- [H5T_conv_t_snip] -->
-typedef herr_t (*H5T_conv_t)(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts, size_t buf_stride,
- size_t bkg_stride, void *buf, void *bkg, hid_t dset_xfer_plist);
-//! <!-- [H5T_conv_t_snip] -->
-
//! <!-- [H5T_conv_except_func_t_snip] -->
/**
* \brief Exception handler.
@@ -279,7 +232,7 @@ typedef herr_t (*H5T_conv_t)(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, siz
* \returns Valid callback function return values are #H5T_CONV_ABORT,
* #H5T_CONV_UNHANDLED and #H5T_CONV_HANDLED.
*
- * \details If an exception like overflow happenes during conversion, this
+ * \details If an exception like overflow happens during conversion, this
* function is called if it's registered through H5Pset_type_conv_cb().
*
*/
@@ -2710,130 +2663,6 @@ H5_DLL herr_t H5Tset_cset(hid_t type_id, H5T_cset_t cset);
*/
H5_DLL herr_t H5Tset_strpad(hid_t type_id, H5T_str_t strpad);
-/* Type conversion database */
-/**
- * \ingroup CONV
- *
- * \brief Registers a datatype conversion function
- *
- * \param[in] pers Conversion function type
- * \param[in] name Name displayed in diagnostic output
- * \type_id{src_id} of source datatype
- * \type_id{dst_id} of destination datatype
- * \param[in] func Function to convert between source and destination datatypes
- *
- * \return \herr_t
- *
- * \details H5Tregister() registers a hard or soft conversion function for a
- * datatype conversion path. The parameter \p pers indicates whether a
- * conversion function is hard (#H5T_PERS_HARD) or soft
- * (#H5T_PERS_SOFT). User-defined functions employing compiler casting
- * are designated as \Emph{hard}; other user-defined conversion
- * functions registered with the HDF5 library (with H5Tregister() )
- * are designated as \Emph{soft}. The HDF5 library also has its own
- * hard and soft conversion functions.
- *
- * A conversion path can have only one hard function. When type is
- * #H5T_PERS_HARD, \p func replaces any previous hard function.
- *
- * When type is #H5T_PERS_SOFT, H5Tregister() adds the function to the
- * end of the master soft list and replaces the soft function in all
- * applicable existing conversion paths. Soft functions are used when
- * determining which conversion function is appropriate for this path.
- *
- * The \p name is used only for debugging and should be a short
- * identifier for the function.
- *
- * The path is specified by the source and destination datatypes \p
- * src_id and \p dst_id. For soft conversion functions, only the class
- * of these types is important.
- *
- * The type of the conversion function pointer is declared as:
- * \snippet this H5T_conv_t_snip
- *
- * The \ref H5T_cdata_t \c struct is declared as:
- * \snippet this H5T_cdata_t_snip
- *
- * \since 1.6.3 The following change occurred in the \ref H5T_conv_t function:
- * the \c nelmts parameter type changed to size_t.
- *
- */
-H5_DLL herr_t H5Tregister(H5T_pers_t pers, const char *name, hid_t src_id, hid_t dst_id, H5T_conv_t func);
-/**
- * \ingroup CONV
- *
- * \brief Removes a conversion function
- *
- * \param[in] pers Conversion function type
- * \param[in] name Name displayed in diagnostic output
- * \type_id{src_id} of source datatype
- * \type_id{dst_id} of destination datatype
- * \param[in] func Function to convert between source and destination datatypes
- *
- * \return \herr_t
- *
- * \details H5Tunregister() removes a conversion function matching criteria
- * such as soft or hard conversion, source and destination types, and
- * the conversion function.
- *
- * If a user is trying to remove a conversion function he registered,
- * all parameters can be used. If he is trying to remove a library’s
- * default conversion function, there is no guarantee the \p name and
- * \p func parameters will match the user’s chosen values. Passing in
- * some values may cause this function to fail. A good practice is to
- * pass in NULL as their values.
- *
- * All parameters are optional. The missing parameters will be used to
- * generalize the search criteria.
- *
- * The conversion function pointer type declaration is described in
- * H5Tregister().
- *
- * \version 1.6.3 The following change occurred in the \ref H5T_conv_t function:
- * the \c nelmts parameter type changed to size_t.
- *
- */
-H5_DLL herr_t H5Tunregister(H5T_pers_t pers, const char *name, hid_t src_id, hid_t dst_id, H5T_conv_t func);
-/**
- * \ingroup CONV
- *
- * \brief Finds a conversion function
- *
- * \type_id{src_id} of source datatype
- * \type_id{dst_id} of destination datatype
- * \param[out] pcdata Pointer to type conversion data
- *
- * \return Returns a pointer to a suitable conversion function if successful.
- * Otherwise returns NULL.
- *
- * \details H5Tfind() finds a conversion function that can handle a conversion
- * from type \p src_id to type \p dst_id. The \p pcdata argument is a
- * pointer to a pointer to type conversion data which was created and
- * initialized by the soft type conversion function of this path when
- * the conversion function was installed on the path.
- *
- */
-H5_DLL H5T_conv_t H5Tfind(hid_t src_id, hid_t dst_id, H5T_cdata_t **pcdata);
-/**
- * \ingroup CONV
- *
- * \brief Check whether the library’s default conversion is hard conversion
- *
- * \type_id{src_id} of source datatype
- * \type_id{dst_id} of destination datatype
- *
- * \return \htri_t
- *
- * \details H5Tcompiler_conv() determines whether the library’s conversion
- * function from type \p src_id to type \p dst_id is a compiler (hard)
- * conversion or not. A compiler conversion uses compiler’s casting; a
- * library (soft) conversion uses the library’s own conversion
- * function.
- *
- * \since 1.8.0
- *
- */
-H5_DLL htri_t H5Tcompiler_conv(hid_t src_id, hid_t dst_id);
/**
* --------------------------------------------------------------------------
* \ingroup CONV
diff --git a/src/H5VLcallback.c b/src/H5VLcallback.c
index 02e0ee0..80134a7 100644
--- a/src/H5VLcallback.c
+++ b/src/H5VLcallback.c
@@ -29,7 +29,7 @@
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
-#include "H5Fprivate.h" /* File access */
+#include "H5Fprivate.h" /* File access */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */
diff --git a/src/H5VLnative.h b/src/H5VLnative.h
index 360317d..425d833 100644
--- a/src/H5VLnative.h
+++ b/src/H5VLnative.h
@@ -39,6 +39,9 @@
#ifndef H5_NO_DEPRECATED_SYMBOLS
#define H5VL_NATIVE_ATTR_ITERATE_OLD 0 /* H5Aiterate (deprecated routine) */
#endif /* H5_NO_DEPRECATED_SYMBOLS */
+/* NOTE: If values over 1023 are added, the H5VL_RESERVED_NATIVE_OPTIONAL macro
+ * must be updated.
+ */
/* Values for native VOL connector dataset optional VOL operations */
/* NOTE: If new values are added here, the H5VL__native_introspect_opt_query
@@ -54,6 +57,9 @@
#define H5VL_NATIVE_DATASET_CHUNK_WRITE 7 /* H5Dchunk_write */
#define H5VL_NATIVE_DATASET_GET_VLEN_BUF_SIZE 8 /* H5Dvlen_get_buf_size */
#define H5VL_NATIVE_DATASET_GET_OFFSET 9 /* H5Dget_offset */
+/* NOTE: If values over 1023 are added, the H5VL_RESERVED_NATIVE_OPTIONAL macro
+ * must be updated.
+ */
/* Values for native VOL connector file optional VOL operations */
/* NOTE: If new values are added here, the H5VL__native_introspect_opt_query
@@ -85,12 +91,17 @@
#define H5VL_NATIVE_FILE_SET_LIBVER_BOUNDS 23 /* H5Fset_latest_format/libver_bounds */
#define H5VL_NATIVE_FILE_GET_MIN_DSET_OHDR_FLAG 24 /* H5Fget_dset_no_attrs_hint */
#define H5VL_NATIVE_FILE_SET_MIN_DSET_OHDR_FLAG 25 /* H5Fset_dset_no_attrs_hint */
-#define H5VL_NATIVE_FILE_GET_MPI_ATOMICITY 26 /* H5Fget_mpi_atomicity */
-#define H5VL_NATIVE_FILE_SET_MPI_ATOMICITY 27 /* H5Fset_mpi_atomicity */
-#define H5VL_NATIVE_FILE_POST_OPEN 28 /* Adjust file after open, with wrapping context */
-#define H5VL_NATIVE_FILE_VFD_SWMR_DISABLE_EOT 29
-#define H5VL_NATIVE_FILE_VFD_SWMR_ENABLE_EOT 30
-#define H5VL_NATIVE_FILE_VFD_SWMR_END_TICK 31
+#ifdef H5_HAVE_PARALLEL
+#define H5VL_NATIVE_FILE_GET_MPI_ATOMICITY 26 /* H5Fget_mpi_atomicity */
+#define H5VL_NATIVE_FILE_SET_MPI_ATOMICITY 27 /* H5Fset_mpi_atomicity */
+#endif
+#define H5VL_NATIVE_FILE_POST_OPEN 28 /* Adjust file after open, with wrapping context */
+#define H5VL_NATIVE_FILE_VFD_SWMR_DISABLE_EOT 29
+#define H5VL_NATIVE_FILE_VFD_SWMR_ENABLE_EOT 30
+#define H5VL_NATIVE_FILE_VFD_SWMR_END_TICK 31
+/* NOTE: If values over 1023 are added, the H5VL_RESERVED_NATIVE_OPTIONAL macro
+ * must be updated.
+ */
/* Values for native VOL connector group optional VOL operations */
/* NOTE: If new values are added here, the H5VL__native_introspect_opt_query
@@ -100,6 +111,9 @@
#define H5VL_NATIVE_GROUP_ITERATE_OLD 0 /* HG5Giterate (deprecated routine) */
#define H5VL_NATIVE_GROUP_GET_OBJINFO 1 /* HG5Gget_objinfo (deprecated routine) */
#endif /* H5_NO_DEPRECATED_SYMBOLS */
+/* NOTE: If values over 1023 are added, the H5VL_RESERVED_NATIVE_OPTIONAL macro
+ * must be updated.
+ */
/* Values for native VOL connector object optional VOL operations */
/* NOTE: If new values are added here, the H5VL__native_introspect_opt_query
@@ -111,6 +125,9 @@
#define H5VL_NATIVE_OBJECT_ENABLE_MDC_FLUSHES 3 /* H5Oenable_mdc_flushes */
#define H5VL_NATIVE_OBJECT_ARE_MDC_FLUSHES_DISABLED 4 /* H5Oare_mdc_flushes_disabled */
#define H5VL_NATIVE_OBJECT_GET_NATIVE_INFO 5 /* H5Oget_native_info(_by_idx, _by_name) */
+/* NOTE: If values over 1023 are added, the H5VL_RESERVED_NATIVE_OPTIONAL macro
+ * must be updated.
+ */
/*******************/
/* Public Typedefs */
diff --git a/src/H5VLnative_attr.c b/src/H5VLnative_attr.c
index 7beb98f..f5e5f29 100644
--- a/src/H5VLnative_attr.c
+++ b/src/H5VLnative_attr.c
@@ -15,8 +15,15 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5A_FRIEND /* Suppress error about including H5Apkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5Apkg.h" /* Attributes */
#include "H5Eprivate.h" /* Error handling */
@@ -30,6 +37,30 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_attr_create
*
diff --git a/src/H5VLnative_dataset.c b/src/H5VLnative_dataset.c
index 21491e7..978ecb3 100644
--- a/src/H5VLnative_dataset.c
+++ b/src/H5VLnative_dataset.c
@@ -15,8 +15,15 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5D_FRIEND /* Suppress error about including H5Dpkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5CXprivate.h" /* API Contexts */
#include "H5Dpkg.h" /* Datasets */
@@ -30,6 +37,30 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_dataset_create
*
diff --git a/src/H5VLnative_datatype.c b/src/H5VLnative_datatype.c
index 9551f50..84b13c3 100644
--- a/src/H5VLnative_datatype.c
+++ b/src/H5VLnative_datatype.c
@@ -15,8 +15,15 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5T_FRIEND /* Suppress error about including H5Tpkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5Gprivate.h" /* Groups */
@@ -28,6 +35,30 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_datatype_commit
*
diff --git a/src/H5VLnative_file.c b/src/H5VLnative_file.c
index 4f7bb90..3bcaaba 100644
--- a/src/H5VLnative_file.c
+++ b/src/H5VLnative_file.c
@@ -15,8 +15,15 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5F_FRIEND /* Suppress error about including H5Fpkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5ACprivate.h" /* Metadata cache */
#include "H5Cprivate.h" /* Cache */
@@ -31,6 +38,30 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_file_create
*
@@ -655,11 +686,11 @@ H5VL__native_file_optional(void *obj, H5VL_file_optional_t optional_type, hid_t
/* H5Freset_page_buffering_stats */
case H5VL_NATIVE_FILE_RESET_PAGE_BUFFERING_STATS: {
/* Sanity check */
- if (NULL == f->shared->pb_ptr)
+ if (NULL == f->shared->page_buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "page buffering not enabled on file")
/* Reset the statistics */
- if (H5PB_reset_stats(f->shared->pb_ptr) < 0)
+ if (H5PB_reset_stats(f->shared->page_buf) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't reset stats for page buffering")
break;
@@ -674,11 +705,11 @@ H5VL__native_file_optional(void *obj, H5VL_file_optional_t optional_type, hid_t
unsigned *bypasses = HDva_arg(arguments, unsigned *);
/* Sanity check */
- if (NULL == f->shared->pb_ptr)
+ if (NULL == f->shared->page_buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "page buffering not enabled on file")
/* Get the statistics */
- if (H5PB_get_stats(f->shared->pb_ptr, accesses, hits, misses, evictions, bypasses) < 0)
+ if (H5PB_get_stats(f->shared->page_buf, accesses, hits, misses, evictions, bypasses) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't retrieve stats for page buffering")
break;
diff --git a/src/H5VLnative_group.c b/src/H5VLnative_group.c
index e3fa702..53f8459 100644
--- a/src/H5VLnative_group.c
+++ b/src/H5VLnative_group.c
@@ -15,8 +15,15 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5G_FRIEND /* Suppress error about including H5Gpkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5Gpkg.h" /* Groups */
@@ -27,6 +34,30 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_group_create
*
diff --git a/src/H5VLnative_introspect.c b/src/H5VLnative_introspect.c
index 6af33ba..45efcf8 100644
--- a/src/H5VLnative_introspect.c
+++ b/src/H5VLnative_introspect.c
@@ -15,12 +15,43 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5VLprivate.h" /* Virtual Object Layer */
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/* Note: H5VL__native_introspect_get_conn_cls is in src/H5VLnative.c so that
* it can return the address of the staticly declared class struct.
*/
@@ -150,8 +181,10 @@ H5VL__native_introspect_opt_query(void H5_ATTR_UNUSED *obj, H5VL_subclass_t subc
case H5VL_NATIVE_FILE_SET_LIBVER_BOUNDS:
case H5VL_NATIVE_FILE_GET_MIN_DSET_OHDR_FLAG:
case H5VL_NATIVE_FILE_SET_MIN_DSET_OHDR_FLAG:
+#ifdef H5_HAVE_PARALLEL
case H5VL_NATIVE_FILE_GET_MPI_ATOMICITY:
case H5VL_NATIVE_FILE_SET_MPI_ATOMICITY:
+#endif /* H5_HAVE_PARALLEL */
case H5VL_NATIVE_FILE_POST_OPEN:
break;
diff --git a/src/H5VLnative_link.c b/src/H5VLnative_link.c
index 72f6cde..eaba18f 100644
--- a/src/H5VLnative_link.c
+++ b/src/H5VLnative_link.c
@@ -15,8 +15,15 @@
*
*/
+/****************/
+/* Module Setup */
+/****************/
+
#define H5L_FRIEND /* Suppress error about including H5Lpkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5Gprivate.h" /* Groups */
@@ -27,6 +34,30 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_link_create
*
diff --git a/src/H5VLnative_object.c b/src/H5VLnative_object.c
index 449f389..95d4fff 100644
--- a/src/H5VLnative_object.c
+++ b/src/H5VLnative_object.c
@@ -15,9 +15,16 @@
*
*/
-#define H5O_FRIEND /* Suppress error about including H5Opkg */
+/****************/
+/* Module Setup */
+/****************/
+
#define H5F_FRIEND /* Suppress error about including H5Fpkg */
+#define H5O_FRIEND /* Suppress error about including H5Opkg */
+/***********/
+/* Headers */
+/***********/
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files (pkg needed for id_exists) */
@@ -29,6 +36,30 @@
#include "H5VLnative_private.h" /* Native VOL connector */
+/****************/
+/* Local Macros */
+/****************/
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/*******************/
+/* Local Variables */
+/*******************/
+
/*-------------------------------------------------------------------------
* Function: H5VL__native_object_open
*
diff --git a/src/H5VLnative_token.c b/src/H5VLnative_token.c
index 65591c7..bed0164 100644
--- a/src/H5VLnative_token.c
+++ b/src/H5VLnative_token.c
@@ -14,6 +14,10 @@
* Purpose: Object token callbacks for the native VOL connector
*/
+/****************/
+/* Module Setup */
+/****************/
+
/***********/
/* Headers */
/***********/
diff --git a/src/H5Zdevelop.h b/src/H5Zdevelop.h
new file mode 100644
index 0000000..328e221
--- /dev/null
+++ b/src/H5Zdevelop.h
@@ -0,0 +1,421 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This file contains public declarations for the H5Z (data filter) developer
+ * support routines.
+ */
+
+#ifndef _H5Zdevelop_H
+#define _H5Zdevelop_H
+
+/* Include package's public header */
+#include "H5Zpublic.h"
+
+/*****************/
+/* Public Macros */
+/*****************/
+
+/**
+ * Current version of the H5Z_class_t struct
+ */
+#define H5Z_CLASS_T_VERS (1)
+
+/*******************/
+/* Public Typedefs */
+/*******************/
+
+/**
+ * Structure for filter callback property
+ */
+typedef struct H5Z_cb_t {
+ H5Z_filter_func_t func;
+ void * op_data;
+} H5Z_cb_t;
+
+/**
+ * \brief This callback determines if a filter can be applied to the dataset
+ * with the characteristics provided
+ *
+ * \dcpl_id
+ * \type_id
+ * \space_id
+ *
+ * \return \htri_t
+ *
+ * \details Before a dataset gets created, the \ref H5Z_can_apply_func_t
+ * callbacks for any filters used in the dataset creation property list
+ * are called with the dataset's dataset creation property list, the
+ * dataset's datatype and a dataspace describing a chunk (for chunked
+ * dataset storage).
+ *
+ * The \ref H5Z_can_apply_func_t callback must determine if the
+ * combination of the dataset creation property list setting, the
+ * datatype and the dataspace represent a valid combination to apply
+ * this filter to. For example, some cases of invalid combinations may
+ * involve the filter not operating correctly on certain datatypes (or
+ * certain datatype sizes), or certain sizes of the chunk dataspace.
+ *
+ * The \ref H5Z_can_apply_func_t callback can be the NULL pointer, in
+ * which case, the library will assume that it can apply to any
+ * combination of dataset creation property list values, datatypes and
+ * dataspaces.
+ *
+ * The \ref H5Z_can_apply_func_t callback returns positive a valid
+ * combination, zero for an invalid combination and negative for an
+ * error.
+ */
+//! <!-- [H5Z_can_apply_func_t_snip] -->
+typedef htri_t (*H5Z_can_apply_func_t)(hid_t dcpl_id, hid_t type_id, hid_t space_id);
+//! <!-- [H5Z_can_apply_func_t_snip] -->
+
+/**
+ * \brief The filter operation callback function, defining a filter's operation
+ * on data
+ *
+ * \dcpl_id
+ * \type_id
+ * \space_id
+ *
+ * \return \herr_t
+ *
+ * \details After the \ref H5Z_can_apply_func_t callbacks are checked for new
+ * datasets, the \ref H5Z_set_local_func_t callbacks for any filters
+ * used in the dataset creation property list are called. These
+ * callbacks receive the dataset's private copy of the dataset creation
+ * property list passed in to H5Dcreate() (i.e. not the actual property
+ * list passed in to H5Dcreate()) and the datatype ID passed in to
+ * H5Dcreate() (which is not copied and should not be modified) and a
+ * dataspace describing the chunk (for chunked dataset storage) (which
+ * should also not be modified).
+ *
+ * The \ref H5Z_set_local_func_t callback must set any parameters that
+ * are specific to this dataset, based on the combination of the
+ * dataset creation property list values, the datatype and the
+ * dataspace. For example, some filters perform different actions based
+ * on different datatypes (or datatype sizes) or different number of
+ * dimensions or dataspace sizes.
+ *
+ * The \ref H5Z_set_local_func_t callback can be the NULL pointer, in
+ * which case, the library will assume that there are no
+ * dataset-specific settings for this filter.
+ *
+ * The \ref H5Z_set_local_func_t callback must return non-negative on
+ * success and negative for an error.
+ */
+//! <!-- [H5Z_set_local_func_t_snip] -->
+typedef herr_t (*H5Z_set_local_func_t)(hid_t dcpl_id, hid_t type_id, hid_t space_id);
+//! <!-- [H5Z_set_local_func_t_snip] -->
+
+/**
+ * \brief The filter operation callback function, defining a filter's operation
+ * on data
+ *
+ * \param[in] flags Bit vector specifying certain general properties of the filter
+ * \param[in] cd_nelmts Number of elements in \p cd_values
+ * \param[in] cd_values Auxiliary data for the filter
+ * \param[in] nbytes The number of valid bytes in \p buf to be filtered
+ * \param[in,out] buf_size The size of \p buf
+ * \param[in,out] buf The filter buffer
+ *
+ * \return Returns the number of valid bytes of data contained in \p buf. In the
+ * case of failure, the return value is 0 (zero) and all pointer
+ * arguments are left unchanged.
+ *
+ * \details A filter gets definition flags and invocation flags (defined
+ * above), the client data array and size defined when the filter was
+ * added to the pipeline, the size in bytes of the data on which to
+ * operate, and pointers to a buffer and its allocated size.
+ *
+ * The filter should store the result in the supplied buffer if
+ * possible, otherwise it can allocate a new buffer, freeing the
+ * original. The allocated size of the new buffer should be returned
+ * through the \p buf_size pointer and the new buffer through the \p
+ * buf pointer.
+ *
+ * The return value from the filter is the number of bytes in the
+ * output buffer. If an error occurs then the function should return
+ * zero and leave all pointer arguments unchanged.
+ */
+//! <!-- [H5Z_func_t_snip] -->
+typedef size_t (*H5Z_func_t)(unsigned int flags, size_t cd_nelmts, const unsigned int cd_values[],
+ size_t nbytes, size_t *buf_size, void **buf);
+//! <!-- [H5Z_func_t_snip] -->
+
+/**
+ * The filter table maps filter identification numbers to structs that
+ * contain a pointers to the filter function and timing statistics.
+ */
+//! <!-- [H5Z_class2_t_snip] -->
+typedef struct H5Z_class2_t {
+ int version; /**< Version number of the H5Z_class_t struct */
+ H5Z_filter_t id; /**< Filter ID number */
+ unsigned encoder_present; /**< Does this filter have an encoder? */
+ unsigned decoder_present; /**< Does this filter have a decoder? */
+ const char * name; /**< Comment for debugging */
+ H5Z_can_apply_func_t can_apply; /**< The "can apply" callback for a filter */
+ H5Z_set_local_func_t set_local; /**< The "set local" callback for a filter */
+ H5Z_func_t filter; /**< The actual filter function */
+} H5Z_class2_t;
+//! <!-- [H5Z_class2_t_snip] -->
+
+/********************/
+/* Public Variables */
+/********************/
+
+/*********************/
+/* Public Prototypes */
+/*********************/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \ingroup H5Z
+ *
+ * \brief Registers a new filter with the HDF5 library
+ *
+ * \param[in] cls A pointer to a buffer for the struct containing the
+ * filter-definition
+ *
+ * \return \herr_t
+ *
+ * \details H5Zregister() registers a new filter with the HDF5 library.
+ *
+ * \details Making a new filter available to an application is a two-step
+ * process. The first step is to write the three filter callback
+ * functions described below: \c can_apply, \c set_local, and \c
+ * filter. This call to H5Zregister(), registering the filter with the
+ * library, is the second step. The can_apply and set_local fields can
+ * be set to NULL if they are not required for the filter being
+ * registered.
+ *
+ * H5Zregister() accepts a single parameter, a pointer to a buffer for
+ * the \p cls data structure. That data structure must conform to one
+ * of the following definitions:
+ * \snippet this H5Z_class1_t_snip
+ * or
+ * \snippet this H5Z_class2_t_snip
+ *
+ * \c version is a library-defined value reporting the version number
+ * of the #H5Z_class_t struct. This currently must be set to
+ * #H5Z_CLASS_T_VERS.
+ *
+ * \c id is the identifier for the new filter. This is a user-defined
+ * value between #H5Z_FILTER_RESERVED and #H5Z_FILTER_MAX. These
+ * values are defined in the HDF5 source file H5Zpublic.h, but the
+ * symbols #H5Z_FILTER_RESERVED and #H5Z_FILTER_MAX should always be
+ * used instead of the literal values.
+ *
+ * \c encoder_present is a library-defined value indicating whether
+ * the filter’s encoding capability is available to the application.
+ *
+ * \c decoder_present is a library-defined value indicating whether
+ * the filter’s encoding capability is available to the application.
+ *
+ * \c name is a descriptive comment used for debugging, may contain a
+ * descriptive name for the filter, and may be the null pointer.
+ *
+ * \c can_apply, described in detail below, is a user-defined callback
+ * function which determines whether the combination of the dataset
+ * creation property list values, the datatype, and the dataspace
+ * represent a valid combination to apply this filter to.
+ *
+ * \c set_local, described in detail below, is a user-defined callback
+ * function which sets any parameters that are specific to this
+ * dataset, based on the combination of the dataset creation property
+ * list values, the datatype, and the dataspace.
+ *
+ * \c filter, described in detail below, is a user-defined callback
+ * function which performs the action of the filter.
+ *
+ * The statistics associated with a filter are not reset by this
+ * function; they accumulate over the life of the library.
+ *
+ * #H5Z_class_t is a macro which maps to either H5Z_class1_t or
+ * H5Z_class2_t, depending on the needs of the application. To affect
+ * only this macro, H5Z_class_t_vers may be defined to either 1 or 2.
+ * Otherwise, it will behave in the same manner as other API
+ * compatibility macros. See API Compatibility Macros in HDF5 for more
+ * information. H5Z_class1_t matches the #H5Z_class_t structure that is
+ * used in the 1.6.x versions of the HDF5 library.
+ *
+ * H5Zregister() will automatically detect which structure type has
+ * been passed in, regardless of the mapping of the #H5Z_class_t macro.
+ * However, the application must make sure that the fields are filled
+ * in according to the correct structure definition if the macro is
+ * used to declare the structure.
+ *
+ * \Bold{The callback functions:}\n Before H5Zregister() can link a
+ * filter into an application, three callback functions must be
+ * defined as described in the HDF5 library header file H5Zpublic.h.
+ *
+ * When a filter is applied to the fractal heap for a group (e.g.,
+ * when compressing group metadata) and if the can apply and set local
+ * callback functions have been defined for that filter, HDF5 passes
+ * the value -1 for all parameters for those callback functions. This
+ * is done to ensure that the filter will not be applied to groups if
+ * it relies on these parameters, as they are not applicable to group
+ * fractal heaps; to operate on group fractal heaps, a filter must be
+ * capable of operating on an opaque block of binary data.
+ *
+ * The \Emph{can apply} callback function must return a positive value
+ * for a valid combination, zero for an invalid combination, and a
+ * negative value for an error.
+ * \snippet this H5Z_can_apply_func_t_snip
+ *
+ * Before a dataset is created, the \Emph{can apply} callbacks for any
+ * filters used in the dataset creation property list are called with
+ * the dataset's dataset creation property list, \c dcpl_id, the
+ * dataset's datatype, \p type_id, and a dataspace describing a chunk,
+ * \p space_id, (for chunked dataset storage).
+ *
+ * This callback must determine whether the combination of the dataset
+ * creation property list settings, the datatype, and the dataspace
+ * represent a valid combination to which to apply this filter. For
+ * example, an invalid combination may involve the filter not
+ * operating correctly on certain datatypes, on certain datatype
+ * sizes, or on certain sizes of the chunk dataspace. If this filter
+ * is enabled through H5Pset_filter() as optional and the can apply
+ * function returns 0, the library will skip the filter in the filter
+ * pipeline.
+ *
+ * This callback can be the NULL pointer, in which case the library
+ * will assume that the filter can be applied to a dataset with any
+ * combination of dataset creation property list values, datatypes,
+ * and dataspaces.
+ *
+ * The \Emph{set local} callback function is defined as follows:
+ * \snippet this H5Z_set_local_func_t_snip
+ *
+ * After the can apply callbacks are checked for a new dataset, the
+ * \Emph{set local} callback functions for any filters used in the
+ * dataset creation property list are called. These callbacks receive
+ * \c dcpl_id, the dataset's private copy of the dataset creation
+ * property list passed in to H5Dcreate() (i.e. not the actual
+ * property list passed in to H5Dcreate()); \c type_id, the datatype
+ * identifier passed in to H5Dcreate(), which is not copied and should
+ * not be modified; and \c space_id, a dataspace describing the chunk
+ * (for chunked dataset storage), which should also not be modified.
+ *
+ * The set local callback must set any filter parameters that are
+ * specific to this dataset, based on the combination of the dataset
+ * creation property list values, the datatype, and the dataspace. For
+ * example, some filters perform different actions based on different
+ * datatypes, datatype sizes, numbers of dimensions, or dataspace
+ * sizes.
+ *
+ * The \Emph{set local} callback may be the NULL pointer, in which
+ * case, the library will assume that there are no dataset-specific
+ * settings for this filter.
+ *
+ * The \Emph{set local} callback function must return a non-negative
+ * value on success and a negative value for an error.
+ *
+ * The \Emph{filter operation} callback function, defining the
+ * filter's operation on the data, is defined as follows:
+ * \snippet this H5Z_func_t_snip
+ *
+ * The parameters \c flags, \c cd_nelmts, and \c cd_values are the
+ * same as for the function H5Pset_filter(). The one exception is that
+ * an additional flag, #H5Z_FLAG_REVERSE, is set when the filter is
+ * called as part of the input pipeline.
+ *
+ * The parameter \c buf points to the input buffer which has a size of
+ * \c buf_size bytes, \c nbytes of which are valid data.
+ *
+ * The filter should perform the transformation in place if possible.
+ * If the transformation cannot be done in place, then the filter
+ * should allocate a new buffer with malloc() and assign it to \c buf,
+ * assigning the allocated size of that buffer to \c buf_size. The old
+ * buffer should be freed by calling free().
+ *
+ * If successful, the \Emph{filter operation} callback function
+ * returns the number of valid bytes of data contained in \c buf. In
+ * the case of failure, the return value is 0 (zero) and all pointer
+ * arguments are left unchanged.
+ *
+ * \version 1.8.6 Return type for the \Emph{can apply} callback function,
+ * \ref H5Z_can_apply_func_t, changed to \ref htri_t.
+ * \version 1.8.5 Semantics of the \Emph{can apply} and \Emph{set local}
+ * callback functions changed to accommodate the use of filters
+ * with group fractal heaps.
+ * \version 1.8.3 #H5Z_class_t renamed to H5Z_class2_t, H5Z_class1_t structure
+ * introduced for backwards compatibility with release 1.6.x,
+ * and #H5Z_class_t macro introduced in this release. Function
+ * modified to accept either structure type.
+ * \version 1.8.0 The fields \c version, \c encoder_present, and
+ * \c decoder_present were added to the #H5Z_class_t \c struct
+ * in this release.
+ * \version 1.6.0 This function was substantially revised in Release 1.6.0 with
+ * a new #H5Z_class_t struct and new set local and can apply
+ * callback functions.
+ *
+ */
+H5_DLL herr_t H5Zregister(const void *cls);
+/**
+ * \ingroup H5Z
+ *
+ * \brief Unregisters a filter.
+ *
+ * \param[in] id Identifier of the filter to be unregistered.
+ * \return \herr_t
+ *
+ * \details H5Zunregister() unregisters the filter specified in \p id.
+ *
+ * \details This function first iterates through all opened datasets and
+ * groups. If an open object that uses this filter is found, the
+ * function will fail with a message indicating that an object using
+ * the filter is still open. All open files are then flushed to make
+ * sure that all cached data that may use this filter are written out.
+ *
+ * If the application is a parallel program, all processes that
+ * participate in collective data write should call this function to
+ * ensure that all data is flushed.
+ *
+ * After a call to H5Zunregister(), the filter specified in filter
+ * will no longer be available to the application.
+ *
+ * \version 1.8.12 Function modified to check for open objects using the
+ * filter.
+ * \since 1.6.0
+ */
+H5_DLL herr_t H5Zunregister(H5Z_filter_t id);
+
+#ifdef __cplusplus
+}
+#endif
+
+/* Symbols defined for compatibility with previous versions of the HDF5 API.
+ *
+ * Use of these symbols is deprecated.
+ */
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+
+/**
+ * The filter table maps filter identification numbers to structs that
+ * contain a pointers to the filter function and timing statistics.
+ */
+//! <!-- [H5Z_class1_t_snip] -->
+typedef struct H5Z_class1_t {
+ H5Z_filter_t id; /**< Filter ID number */
+ const char * name; /**< Comment for debugging */
+ H5Z_can_apply_func_t can_apply; /**< The "can apply" callback for a filter */
+ H5Z_set_local_func_t set_local; /**< The "set local" callback for a filter */
+ H5Z_func_t filter; /**< The actual filter function */
+} H5Z_class1_t;
+//! <!-- [H5Z_class1_t_snip] -->
+
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+#endif /* _H5Zdevelop_H */
diff --git a/src/H5Zprivate.h b/src/H5Zprivate.h
index 51690b4..13eb26f 100644
--- a/src/H5Zprivate.h
+++ b/src/H5Zprivate.h
@@ -21,8 +21,9 @@
/* Early typedefs to avoid circular dependencies */
typedef struct H5Z_filter_info_t H5Z_filter_info_t;
-/* Include package's public header */
+/* Include package's public headers */
#include "H5Zpublic.h"
+#include "H5Zdevelop.h"
/* Private headers needed by this file */
#include "H5Tprivate.h" /* Datatypes */
diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h
index 90277cf..5d04c9a 100644
--- a/src/H5Zpublic.h
+++ b/src/H5Zpublic.h
@@ -207,11 +207,6 @@ typedef enum H5Z_SO_scale_type_t {
} H5Z_SO_scale_type_t;
/**
- * Current version of the H5Z_class_t struct
- */
-#define H5Z_CLASS_T_VERS (1)
-
-/**
* \ingroup FLETCHER32
* Values to decide if EDC is enabled for reading data
*/
@@ -243,354 +238,11 @@ typedef enum H5Z_cb_return_t {
typedef H5Z_cb_return_t (*H5Z_filter_func_t)(H5Z_filter_t filter, void *buf, size_t buf_size, void *op_data);
//! <!-- [H5Z_filter_func_t_snip] -->
-/**
- * Structure for filter callback property
- */
-typedef struct H5Z_cb_t {
- H5Z_filter_func_t func;
- void * op_data;
-} H5Z_cb_t;
-
#ifdef __cplusplus
extern "C" {
#endif
/**
- * \brief This callback determines if a filter can be applied to the dataset
- * with the characteristics provided
- *
- * \dcpl_id
- * \type_id
- * \space_id
- *
- * \return \htri_t
- *
- * \details Before a dataset gets created, the \ref H5Z_can_apply_func_t
- * callbacks for any filters used in the dataset creation property list
- * are called with the dataset's dataset creation property list, the
- * dataset's datatype and a dataspace describing a chunk (for chunked
- * dataset storage).
- *
- * The \ref H5Z_can_apply_func_t callback must determine if the
- * combination of the dataset creation property list setting, the
- * datatype and the dataspace represent a valid combination to apply
- * this filter to. For example, some cases of invalid combinations may
- * involve the filter not operating correctly on certain datatypes (or
- * certain datatype sizes), or certain sizes of the chunk dataspace.
- *
- * The \ref H5Z_can_apply_func_t callback can be the NULL pointer, in
- * which case, the library will assume that it can apply to any
- * combination of dataset creation property list values, datatypes and
- * dataspaces.
- *
- * The \ref H5Z_can_apply_func_t callback returns positive a valid
- * combination, zero for an invalid combination and negative for an
- * error.
- */
-//! <!-- [H5Z_can_apply_func_t_snip] -->
-typedef htri_t (*H5Z_can_apply_func_t)(hid_t dcpl_id, hid_t type_id, hid_t space_id);
-//! <!-- [H5Z_can_apply_func_t_snip] -->
-/**
- * \brief The filter operation callback function, defining a filter's operation
- * on data
- *
- * \dcpl_id
- * \type_id
- * \space_id
- *
- * \return \herr_t
- *
- * \details After the \ref H5Z_can_apply_func_t callbacks are checked for new
- * datasets, the \ref H5Z_set_local_func_t callbacks for any filters
- * used in the dataset creation property list are called. These
- * callbacks receive the dataset's private copy of the dataset creation
- * property list passed in to H5Dcreate() (i.e. not the actual property
- * list passed in to H5Dcreate()) and the datatype ID passed in to
- * H5Dcreate() (which is not copied and should not be modified) and a
- * dataspace describing the chunk (for chunked dataset storage) (which
- * should also not be modified).
- *
- * The \ref H5Z_set_local_func_t callback must set any parameters that
- * are specific to this dataset, based on the combination of the
- * dataset creation property list values, the datatype and the
- * dataspace. For example, some filters perform different actions based
- * on different datatypes (or datatype sizes) or different number of
- * dimensions or dataspace sizes.
- *
- * The \ref H5Z_set_local_func_t callback can be the NULL pointer, in
- * which case, the library will assume that there are no
- * dataset-specific settings for this filter.
- *
- * The \ref H5Z_set_local_func_t callback must return non-negative on
- * success and negative for an error.
- */
-//! <!-- [H5Z_set_local_func_t_snip] -->
-typedef herr_t (*H5Z_set_local_func_t)(hid_t dcpl_id, hid_t type_id, hid_t space_id);
-//! <!-- [H5Z_set_local_func_t_snip] -->
-
-/**
- * \brief The filter operation callback function, defining a filter's operation
- * on data
- *
- * \param[in] flags Bit vector specifying certain general properties of the filter
- * \param[in] cd_nelmts Number of elements in \p cd_values
- * \param[in] cd_values Auxiliary data for the filter
- * \param[in] nbytes The number of valid bytes in \p buf to be filtered
- * \param[in,out] buf_size The size of \p buf
- * \param[in,out] buf The filter buffer
- *
- * \return Returns the number of valid bytes of data contained in \p buf. In the
- * case of failure, the return value is 0 (zero) and all pointer
- * arguments are left unchanged.
- *
- * \details A filter gets definition flags and invocation flags (defined
- * above), the client data array and size defined when the filter was
- * added to the pipeline, the size in bytes of the data on which to
- * operate, and pointers to a buffer and its allocated size.
- *
- * The filter should store the result in the supplied buffer if
- * possible, otherwise it can allocate a new buffer, freeing the
- * original. The allocated size of the new buffer should be returned
- * through the \p buf_size pointer and the new buffer through the \p
- * buf pointer.
- *
- * The return value from the filter is the number of bytes in the
- * output buffer. If an error occurs then the function should return
- * zero and leave all pointer arguments unchanged.
- */
-//! <!-- [H5Z_func_t_snip] -->
-typedef size_t (*H5Z_func_t)(unsigned int flags, size_t cd_nelmts, const unsigned int cd_values[],
- size_t nbytes, size_t *buf_size, void **buf);
-//! <!-- [H5Z_func_t_snip] -->
-/**
- * The filter table maps filter identification numbers to structs that
- * contain a pointers to the filter function and timing statistics.
- */
-//! <!-- [H5Z_class2_t_snip] -->
-typedef struct H5Z_class2_t {
- int version; /**< Version number of the H5Z_class_t struct */
- H5Z_filter_t id; /**< Filter ID number */
- unsigned encoder_present; /**< Does this filter have an encoder? */
- unsigned decoder_present; /**< Does this filter have a decoder? */
- const char * name; /**< Comment for debugging */
- H5Z_can_apply_func_t can_apply; /**< The "can apply" callback for a filter */
- H5Z_set_local_func_t set_local; /**< The "set local" callback for a filter */
- H5Z_func_t filter; /**< The actual filter function */
-} H5Z_class2_t;
-//! <!-- [H5Z_class2_t_snip] -->
-
-/**
- * \ingroup H5Z
- *
- * \brief Registers a new filter with the HDF5 library
- *
- * \param[in] cls A pointer to a buffer for the struct containing the
- * filter-definition
- *
- * \return \herr_t
- *
- * \details H5Zregister() registers a new filter with the HDF5 library.
- *
- * \details Making a new filter available to an application is a two-step
- * process. The first step is to write the three filter callback
- * functions described below: \c can_apply, \c set_local, and \c
- * filter. This call to H5Zregister(), registering the filter with the
- * library, is the second step. The can_apply and set_local fields can
- * be set to NULL if they are not required for the filter being
- * registered.
- *
- * H5Zregister() accepts a single parameter, a pointer to a buffer for
- * the \p cls data structure. That data structure must conform to one
- * of the following definitions:
- * \snippet this H5Z_class1_t_snip
- * or
- * \snippet this H5Z_class2_t_snip
- *
- * \c version is a library-defined value reporting the version number
- * of the #H5Z_class_t struct. This currently must be set to
- * #H5Z_CLASS_T_VERS.
- *
- * \c id is the identifier for the new filter. This is a user-defined
- * value between #H5Z_FILTER_RESERVED and #H5Z_FILTER_MAX. These
- * values are defined in the HDF5 source file H5Zpublic.h, but the
- * symbols #H5Z_FILTER_RESERVED and #H5Z_FILTER_MAX should always be
- * used instead of the literal values.
- *
- * \c encoder_present is a library-defined value indicating whether
- * the filter’s encoding capability is available to the application.
- *
- * \c decoder_present is a library-defined value indicating whether
- * the filter’s encoding capability is available to the application.
- *
- * \c name is a descriptive comment used for debugging, may contain a
- * descriptive name for the filter, and may be the null pointer.
- *
- * \c can_apply, described in detail below, is a user-defined callback
- * function which determines whether the combination of the dataset
- * creation property list values, the datatype, and the dataspace
- * represent a valid combination to apply this filter to.
- *
- * \c set_local, described in detail below, is a user-defined callback
- * function which sets any parameters that are specific to this
- * dataset, based on the combination of the dataset creation property
- * list values, the datatype, and the dataspace.
- *
- * \c filter, described in detail below, is a user-defined callback
- * function which performs the action of the filter.
- *
- * The statistics associated with a filter are not reset by this
- * function; they accumulate over the life of the library.
- *
- * #H5Z_class_t is a macro which maps to either H5Z_class1_t or
- * H5Z_class2_t, depending on the needs of the application. To affect
- * only this macro, H5Z_class_t_vers may be defined to either 1 or 2.
- * Otherwise, it will behave in the same manner as other API
- * compatibility macros. See API Compatibility Macros in HDF5 for more
- * information. H5Z_class1_t matches the #H5Z_class_t structure that is
- * used in the 1.6.x versions of the HDF5 library.
- *
- * H5Zregister() will automatically detect which structure type has
- * been passed in, regardless of the mapping of the #H5Z_class_t macro.
- * However, the application must make sure that the fields are filled
- * in according to the correct structure definition if the macro is
- * used to declare the structure.
- *
- * \Bold{The callback functions:}\n Before H5Zregister() can link a
- * filter into an application, three callback functions must be
- * defined as described in the HDF5 library header file H5Zpublic.h.
- *
- * When a filter is applied to the fractal heap for a group (e.g.,
- * when compressing group metadata) and if the can apply and set local
- * callback functions have been defined for that filter, HDF5 passes
- * the value -1 for all parameters for those callback functions. This
- * is done to ensure that the filter will not be applied to groups if
- * it relies on these parameters, as they are not applicable to group
- * fractal heaps; to operate on group fractal heaps, a filter must be
- * capable of operating on an opaque block of binary data.
- *
- * The \Emph{can apply} callback function must return a positive value
- * for a valid combination, zero for an invalid combination, and a
- * negative value for an error.
- * \snippet this H5Z_can_apply_func_t_snip
- *
- * Before a dataset is created, the \Emph{can apply} callbacks for any
- * filters used in the dataset creation property list are called with
- * the dataset's dataset creation property list, \c dcpl_id, the
- * dataset's datatype, \p type_id, and a dataspace describing a chunk,
- * \p space_id, (for chunked dataset storage).
- *
- * This callback must determine whether the combination of the dataset
- * creation property list settings, the datatype, and the dataspace
- * represent a valid combination to which to apply this filter. For
- * example, an invalid combination may involve the filter not
- * operating correctly on certain datatypes, on certain datatype
- * sizes, or on certain sizes of the chunk dataspace. If this filter
- * is enabled through H5Pset_filter() as optional and the can apply
- * function returns 0, the library will skip the filter in the filter
- * pipeline.
- *
- * This callback can be the NULL pointer, in which case the library
- * will assume that the filter can be applied to a dataset with any
- * combination of dataset creation property list values, datatypes,
- * and dataspaces.
- *
- * The \Emph{set local} callback function is defined as follows:
- * \snippet this H5Z_set_local_func_t_snip
- *
- * After the can apply callbacks are checked for a new dataset, the
- * \Emph{set local} callback functions for any filters used in the
- * dataset creation property list are called. These callbacks receive
- * \c dcpl_id, the dataset's private copy of the dataset creation
- * property list passed in to H5Dcreate() (i.e. not the actual
- * property list passed in to H5Dcreate()); \c type_id, the datatype
- * identifier passed in to H5Dcreate(), which is not copied and should
- * not be modified; and \c space_id, a dataspace describing the chunk
- * (for chunked dataset storage), which should also not be modified.
- *
- * The set local callback must set any filter parameters that are
- * specific to this dataset, based on the combination of the dataset
- * creation property list values, the datatype, and the dataspace. For
- * example, some filters perform different actions based on different
- * datatypes, datatype sizes, numbers of dimensions, or dataspace
- * sizes.
- *
- * The \Emph{set local} callback may be the NULL pointer, in which
- * case, the library will assume that there are no dataset-specific
- * settings for this filter.
- *
- * The \Emph{set local} callback function must return a non-negative
- * value on success and a negative value for an error.
- *
- * The \Emph{filter operation} callback function, defining the
- * filter's operation on the data, is defined as follows:
- * \snippet this H5Z_func_t_snip
- *
- * The parameters \c flags, \c cd_nelmts, and \c cd_values are the
- * same as for the function H5Pset_filter(). The one exception is that
- * an additional flag, #H5Z_FLAG_REVERSE, is set when the filter is
- * called as part of the input pipeline.
- *
- * The parameter \c buf points to the input buffer which has a size of
- * \c buf_size bytes, \c nbytes of which are valid data.
- *
- * The filter should perform the transformation in place if possible.
- * If the transformation cannot be done in place, then the filter
- * should allocate a new buffer with malloc() and assign it to \c buf,
- * assigning the allocated size of that buffer to \c buf_size. The old
- * buffer should be freed by calling free().
- *
- * If successful, the \Emph{filter operation} callback function
- * returns the number of valid bytes of data contained in \c buf. In
- * the case of failure, the return value is 0 (zero) and all pointer
- * arguments are left unchanged.
- *
- * \version 1.8.6 Return type for the \Emph{can apply} callback function,
- * \ref H5Z_can_apply_func_t, changed to \ref htri_t.
- * \version 1.8.5 Semantics of the \Emph{can apply} and \Emph{set local}
- * callback functions changed to accommodate the use of filters
- * with group fractal heaps.
- * \version 1.8.3 #H5Z_class_t renamed to H5Z_class2_t, H5Z_class1_t structure
- * introduced for backwards compatibility with release 1.6.x,
- * and #H5Z_class_t macro introduced in this release. Function
- * modified to accept either structure type.
- * \version 1.8.0 The fields \c version, \c encoder_present, and
- * \c decoder_present were added to the #H5Z_class_t \c struct
- * in this release.
- * \version 1.6.0 This function was substantially revised in Release 1.6.0 with
- * a new #H5Z_class_t struct and new set local and can apply
- * callback functions.
- *
- */
-H5_DLL herr_t H5Zregister(const void *cls);
-/**
- * \ingroup H5Z
- *
- * \brief Unregisters a filter.
- *
- * \param[in] id Identifier of the filter to be unregistered.
- * \return \herr_t
- *
- * \details H5Zunregister() unregisters the filter specified in \p id.
- *
- * \details This function first iterates through all opened datasets and
- * groups. If an open object that uses this filter is found, the
- * function will fail with a message indicating that an object using
- * the filter is still open. All open files are then flushed to make
- * sure that all cached data that may use this filter are written out.
- *
- * If the application is a parallel program, all processes that
- * participate in collective data write should call this function to
- * ensure that all data is flushed.
- *
- * After a call to H5Zunregister(), the filter specified in filter
- * will no longer be available to the application.
- *
- * \version 1.8.12 Function modified to check for open objects using the
- * filter.
- * \since 1.6.0
- */
-H5_DLL herr_t H5Zunregister(H5Z_filter_t id);
-/**
* \ingroup H5Z
*
* \brief Determines whether a filter is available
@@ -662,29 +314,8 @@ H5_DLL htri_t H5Zfilter_avail(H5Z_filter_t id);
*/
H5_DLL herr_t H5Zget_filter_info(H5Z_filter_t filter, unsigned int *filter_config_flags);
-/* Symbols defined for compatibility with previous versions of the HDF5 API.
- *
- * Use of these symbols is deprecated.
- */
-#ifndef H5_NO_DEPRECATED_SYMBOLS
-
-/**
- * The filter table maps filter identification numbers to structs that
- * contain a pointers to the filter function and timing statistics.
- */
-//! <!-- [H5Z_class1_t_snip] -->
-typedef struct H5Z_class1_t {
- H5Z_filter_t id; /**< Filter ID number */
- const char * name; /**< Comment for debugging */
- H5Z_can_apply_func_t can_apply; /**< The "can apply" callback for a filter */
- H5Z_set_local_func_t set_local; /**< The "set local" callback for a filter */
- H5Z_func_t filter; /**< The actual filter function */
-} H5Z_class1_t;
-//! <!-- [H5Z_class1_t_snip] -->
-
-#endif /* H5_NO_DEPRECATED_SYMBOLS */
-
#ifdef __cplusplus
}
#endif
-#endif
+
+#endif /* _H5Zpublic_H */
diff --git a/src/H5err.txt b/src/H5err.txt
index 64452ec..d2f1093 100644
--- a/src/H5err.txt
+++ b/src/H5err.txt
@@ -105,7 +105,7 @@ SECTION, PIPELINE, I/O pipeline errors
SECTION, SYSTEM, System level errors
SECTION, PLUGIN, Plugin errors
SECTION, MAP, Map related errors
-SECTION, ASYNC, Asynchronous I/O errors
+SECTION, ASYNC, Asynchronous operation errors
SECTION, NONE, No error
# Minor errors
@@ -139,6 +139,7 @@ MINOR, FILEACC, H5E_NOTHDF5, Not an HDF5 file
MINOR, FILEACC, H5E_BADFILE, Bad file ID accessed
MINOR, FILEACC, H5E_TRUNCATED, File has been truncated
MINOR, FILEACC, H5E_MOUNT, File mount error
+MINOR, FILEACC, H5E_UNMOUNT, File unmount error
MINOR, FILEACC, H5E_CANTDELETEFILE, Unable to delete file
MINOR, FILEACC, H5E_CANTLOCKFILE, Unable to lock file
MINOR, FILEACC, H5E_CANTUNLOCKFILE, Unable to unlock file
@@ -290,6 +291,7 @@ MINOR, MAP, H5E_CANTPUT, Can't put value
# Asynchronous operation errors
MINOR, ASYNC, H5E_CANTWAIT, Can't wait on operation
+MINOR, ASYNC, H5E_CANTCANCEL, Can't cancel operation
# No error, for backward compatibility */
MINOR, NONE, H5E_NONE_MINOR, No error
diff --git a/src/H5private.h b/src/H5private.h
index 07272eb..cc19e8e 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -565,6 +565,29 @@ typedef long int32_t;
#define LOCK_UN 0x08
#endif /* H5_HAVE_FLOCK */
+/* Macros for enabling/disabling particular GCC warnings
+ *
+ * These are duplicated in H5FDmulti.c (we don't want to put them in the
+ * public header and the multi VFD can't use private headers). If you make
+ * changes here, be sure to update those as well.
+ *
+ * (see the following web-sites for more info:
+ * http://www.dbp-consulting.com/tutorials/SuppressingGCCWarnings.html
+ * http://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html#Diagnostic-Pragmas
+ */
+/* These pragmas are only implemented usefully in gcc 4.6+ */
+#if ((__GNUC__ * 100) + __GNUC_MINOR__) >= 406
+#define H5_GCC_DIAG_JOINSTR(x, y) x y
+#define H5_GCC_DIAG_DO_PRAGMA(x) _Pragma(#x)
+#define H5_GCC_DIAG_PRAGMA(x) H5_GCC_DIAG_DO_PRAGMA(GCC diagnostic x)
+
+#define H5_GCC_DIAG_OFF(x) H5_GCC_DIAG_PRAGMA(push) H5_GCC_DIAG_PRAGMA(ignored H5_GCC_DIAG_JOINSTR("-W", x))
+#define H5_GCC_DIAG_ON(x) H5_GCC_DIAG_PRAGMA(pop)
+#else
+#define H5_GCC_DIAG_OFF(x)
+#define H5_GCC_DIAG_ON(x)
+#endif
+
/* Typedefs and functions for timing certain parts of the library. */
/* A set of elapsed/user/system times emitted as a time point by the
@@ -1616,6 +1639,9 @@ H5_DLL int64_t HDstrtoll(const char *s, const char **rest, int base);
#ifndef HDunlink
#define HDunlink(S) unlink(S)
#endif /* HDunlink */
+#ifndef HDunsetenv
+#define HDunsetenv(S) unsetenv(S)
+#endif /* HDsetenv */
#ifndef HDutime
#define HDutime(S, T) utime(S, T)
#endif /* HDutime */
diff --git a/src/H5public.h b/src/H5public.h
index b548889..cef15d9 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -42,7 +42,11 @@
#include <limits.h> /* For H5T_NATIVE_CHAR defn in H5Tpublic.h */
#include <stdarg.h> /* For variadic functions in H5VLpublic.h */
-#include <stdint.h> /* For C9x types */
+#include <stdint.h> /* For C9x types */
+
+#ifdef __cplusplus
+#define __STDC_FORMAT_MACROS
+#endif
#include <inttypes.h> /* C99/POSIX.1 header for uint64_t, PRIu64 */
#ifdef H5_HAVE_STDDEF_H
@@ -59,31 +63,6 @@
#endif
#endif
-/* Include the Windows API adapter header early */
-#include "H5api_adpt.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Macros for enabling/disabling particular GCC warnings */
-/* (see the following web-sites for more info:
- * http://www.dbp-consulting.com/tutorials/SuppressingGCCWarnings.html
- * http://gcc.gnu.org/onlinedocs/gcc/Diagnostic-Pragmas.html#Diagnostic-Pragmas
- */
-/* These pragmas are only implemented usefully in gcc 4.6+ */
-#if ((__GNUC__ * 100) + __GNUC_MINOR__) >= 406
-#define H5_GCC_DIAG_JOINSTR(x, y) x y
-#define H5_GCC_DIAG_DO_PRAGMA(x) _Pragma(#x)
-#define H5_GCC_DIAG_PRAGMA(x) H5_GCC_DIAG_DO_PRAGMA(GCC diagnostic x)
-
-#define H5_GCC_DIAG_OFF(x) H5_GCC_DIAG_PRAGMA(push) H5_GCC_DIAG_PRAGMA(ignored H5_GCC_DIAG_JOINSTR("-W", x))
-#define H5_GCC_DIAG_ON(x) H5_GCC_DIAG_PRAGMA(pop)
-#else
-#define H5_GCC_DIAG_OFF(x)
-#define H5_GCC_DIAG_ON(x)
-#endif
-
/* Macro to hide a symbol from further preprocessor substitutions */
#define H5_NO_EXPAND(x) (x)
@@ -368,6 +347,13 @@ typedef struct H5_alloc_stats_t {
*/
typedef void (*H5_atclose_func_t)(void *ctx);
+/* API adapter header (defines H5_DLL, etc.) */
+#include "H5api_adpt.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/* Functions in H5.c */
/**
* \ingroup H5
diff --git a/src/H5system.c b/src/H5system.c
index 52bb930..879c003 100644
--- a/src/H5system.c
+++ b/src/H5system.c
@@ -460,6 +460,9 @@ Wgettimeofday(struct timeval *tv, struct timezone *tz)
* Interestingly, getenv *is* available in the Windows
* POSIX layer, just not setenv.
*
+ * Note: Passing an empty string ("") for the value will remove
+ * the variable from the environment (like unsetenv(3))
+ *
* Return: Success: 0
* Failure: non-zero error code
*
@@ -471,14 +474,14 @@ Wgettimeofday(struct timeval *tv, struct timezone *tz)
int
Wsetenv(const char *name, const char *value, int overwrite)
{
- size_t bufsize;
- errno_t err;
-
/* If we're not overwriting, check if the environment variable exists.
* If it does (i.e.: the required buffer size to store the variable's
* value is non-zero), then return an error code.
*/
if (!overwrite) {
+ size_t bufsize;
+ errno_t err;
+
err = getenv_s(&bufsize, NULL, 0, name);
if (err || bufsize)
return (int)err;
@@ -961,18 +964,32 @@ done:
*
* Purpose: Sleep for a given # of nanoseconds
*
+ * Note that commodity hardware is probably going to have a
+ * resolution of milliseconds, not nanoseconds.
+ *
* Return: void
*--------------------------------------------------------------------------
*/
void
H5_nanosleep(uint64_t nanosec)
{
-#ifndef H5_HAVE_WIN32_API
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+#ifdef H5_HAVE_WIN32_API
+ DWORD dwMilliseconds = (DWORD)HDceil(nanosec / 1.0e6);
+ DWORD ignore;
+
+ /* Windows can't sleep at a ns resolution. Best we can do is ~1 ms. We
+ * don't care about the return value since the second parameter
+ * (bAlertable) is FALSE, so it will always be zero.
+ */
+ ignore = SleepEx(dwMilliseconds, FALSE);
+
+#else
+
const uint64_t nanosec_per_sec = 1000 * 1000 * 1000;
struct timespec sleeptime; /* Struct to hold time to sleep */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
/* Set up time to sleep
*
* Assuming ILP32 or LP64 or wider architecture, (long)operand
@@ -994,22 +1011,9 @@ H5_nanosleep(uint64_t nanosec)
if (errno != EINTR)
break;
}
+#endif
FUNC_LEAVE_NOAPI_VOID
-#else
- DWORD dwMilliseconds = (DWORD)HDceil(nanosec / 1.0e6);
- DWORD ignore;
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Windows can't sleep at a ns resolution. Best we can do is ~1 ms. We
- * don't care about the return value since the second parameter
- * (bAlertable) is FALSE, so it will always be zero.
- */
- ignore = SleepEx(dwMilliseconds, FALSE);
-
- FUNC_LEAVE_NOAPI_VOID
-#endif /* H5_HAVE_WIN32_API */
} /* end H5_nanosleep() */
#ifdef H5_HAVE_WIN32_API
diff --git a/src/H5trace.c b/src/H5trace.c
index ecb2705..e33bc12 100644
--- a/src/H5trace.c
+++ b/src/H5trace.c
@@ -3608,6 +3608,7 @@ H5_trace_args(H5RS_str_t *rs, const char *type, va_list ap)
H5RS_acat(rs, "H5VL_NATIVE_FILE_SET_MIN_DSET_OHDR_FLAG");
break;
+#ifdef H5_HAVE_PARALLEL
case H5VL_NATIVE_FILE_GET_MPI_ATOMICITY:
H5RS_acat(rs, "H5VL_NATIVE_FILE_GET_MPI_ATOMICITY");
break;
@@ -3615,6 +3616,7 @@ H5_trace_args(H5RS_str_t *rs, const char *type, va_list ap)
case H5VL_NATIVE_FILE_SET_MPI_ATOMICITY:
H5RS_acat(rs, "H5VL_NATIVE_FILE_SET_MPI_ATOMICITY");
break;
+#endif /* H5_HAVE_PARALLEL */
case H5VL_NATIVE_FILE_POST_OPEN:
H5RS_acat(rs, "H5VL_NATIVE_FILE_POST_OPEN");
diff --git a/src/H5win32defs.h b/src/H5win32defs.h
index 0617643..26bca67 100644
--- a/src/H5win32defs.h
+++ b/src/H5win32defs.h
@@ -100,6 +100,7 @@ H5_DLL int H5_get_win32_times(H5_timevals_t *tvs);
#define HDgettimeofday(V, Z) Wgettimeofday(V, Z)
#define HDsetenv(N, V, O) Wsetenv(N, V, O)
+#define HDunsetenv(N, V, O) Wsetenv(N, "", 1)
#define HDflock(F, L) Wflock(F, L)
#define HDgetlogin() Wgetlogin()
diff --git a/src/Makefile.am b/src/Makefile.am
index f171c60..37ff850 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -74,7 +74,7 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5lib_settings.c H5system.c \
H5HF.c H5HFbtree2.c H5HFcache.c H5HFdbg.c H5HFdblock.c H5HFdtable.c \
H5HFhdr.c H5HFhuge.c H5HFiblock.c H5HFiter.c H5HFman.c H5HFsection.c \
H5HFspace.c H5HFstat.c H5HFtest.c H5HFtiny.c \
- H5HG.c H5HGcache.c H5HGdbg.c H5HGquery.c H5HGtrap.c \
+ H5HG.c H5HGcache.c H5HGdbg.c H5HGquery.c \
H5HL.c H5HLcache.c H5HLdbg.c H5HLint.c H5HLprfx.c H5HLdblk.c \
H5HP.c \
H5I.c H5Idbg.c H5Iint.c H5Itest.c \
@@ -112,8 +112,7 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5lib_settings.c H5system.c \
H5VLnative_attr.c H5VLnative_blob.c H5VLnative_dataset.c \
H5VLnative_datatype.c H5VLnative_file.c H5VLnative_group.c \
H5VLnative_link.c H5VLnative_introspect.c H5VLnative_object.c \
- H5VLnative_token.c \
- H5VLpassthru.c \
+ H5VLnative_token.c H5VLpassthru.c \
H5VM.c H5WB.c H5Z.c \
H5Zdeflate.c H5Zfletcher32.c H5Znbit.c H5Zshuffle.c H5Zscaleoffset.c \
H5Zszip.c H5Ztrans.c
@@ -144,7 +143,7 @@ if ROS3_VFD_CONDITIONAL
endif
# Public headers
-include_HEADERS = hdf5.h H5api_adpt.h H5overflow.h H5pubconf.h H5public.h H5queue.h H5version.h \
+include_HEADERS = hdf5.h H5api_adpt.h H5overflow.h H5pubconf.h H5public.h H5version.h \
H5Apublic.h H5ACpublic.h \
H5Cpublic.h H5Dpublic.h \
H5Epubgen.h H5Epublic.h H5ESpublic.h H5Fpublic.h \
@@ -155,11 +154,15 @@ include_HEADERS = hdf5.h H5api_adpt.h H5overflow.h H5pubconf.h H5public.h H5queu
H5Gpublic.h H5Ipublic.h H5Lpublic.h \
H5Mpublic.h H5MMpublic.h H5Opublic.h H5Ppublic.h \
H5PLextern.h H5PLpublic.h \
- H5Rpublic.h H5Spublic.h H5Tpublic.h H5TSpublic.h \
+ H5Rpublic.h H5Spublic.h H5Tpublic.h \
H5VLconnector.h H5VLconnector_passthru.h \
H5VLnative.h H5VLpassthru.h H5VLpublic.h \
H5Zpublic.h
+# Public component author headers
+include_HEADERS += H5FDdevelop.h H5Idevelop.h H5Ldevelop.h \
+ H5Tdevelop.h H5TSdevelop.h H5Zdevelop.h
+
# install libhdf5.settings in lib directory
settingsdir=$(libdir)
settings_DATA=libhdf5.settings
diff --git a/src/hdf5.h b/src/hdf5.h
index ab799e2..b4ece16 100644
--- a/src/hdf5.h
+++ b/src/hdf5.h
@@ -38,10 +38,21 @@
#include "H5Rpublic.h" /* References */
#include "H5Spublic.h" /* Dataspaces */
#include "H5Tpublic.h" /* Datatypes */
-#include "H5TSpublic.h" /* Thread-safety */
#include "H5VLpublic.h" /* Virtual Object Layer */
#include "H5Zpublic.h" /* Data filters */
+/* Plugin/component developer headers */
+#include "H5FDdevelop.h" /* File drivers */
+#include "H5Idevelop.h" /* ID management */
+#include "H5Ldevelop.h" /* Links */
+#include "H5Tdevelop.h" /* Datatypes */
+#include "H5TSdevelop.h" /* Threadsafety */
+#include "H5Zdevelop.h" /* Data filters */
+
+/* Virtual object layer (VOL) connector developer support */
+#include "H5VLconnector.h" /* VOL connector author routines */
+#include "H5VLconnector_passthru.h" /* Pass-through VOL connector author routines */
+
/* Predefined file drivers */
#include "H5FDcore.h" /* Files stored entirely in memory */
#include "H5FDdirect.h" /* Linux direct I/O */